COMPMID-2577: Fuse bias addition and activation in gemm assembly kernels

Change-Id: I7f52112d2d05b1ea3d3f3d4b19b8eafab05d6c44
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2141
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp b/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
index e1af2d4..9409646 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
@@ -28,13 +28,35 @@
 #include <arm_neon.h>
 
 template<>
-inline void MergeResults<8, 6, false>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta) {
+void MergeResults<8, 6, false>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float *bias, Activation act, bool append) {
     const float *inptr = in;
     prefetch_6x(inptr);
     prefetch_6x(inptr + 96);
 
-    float32x4_t av = vdupq_n_f32(alpha);
-    float32x4_t bv = vdupq_n_f32(beta);
+    float nullbias[8];
+    float minval = - std::numeric_limits<float>::infinity();
+    float maxval =   std::numeric_limits<float>::infinity();
+
+    switch(act.type)
+    {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            minval = 0.0f;
+            break;
+    }
+
+    float32x4_t minv = vdupq_n_f32(minval);
+    float32x4_t maxv = vdupq_n_f32(maxval);
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (8 * sizeof(float)));
+    }
 
     for (int y=y0; y<ymax; y+=8) {
         float *outptr0 = out + (y * ldout) + x0;
@@ -61,16 +83,12 @@
                 switch ((y + 5) - ymax) {
                     case 4:
                         outptr1 = dummyres;
-                        // fall through
                     case 3:
                         outptr2 = dummyres;
-                        // fall through
                     case 2:
                         outptr3 = dummyres;
-                        // fall through
                     case 1:
                         outptr4 = dummyres;
-                        // fall through
                     case 0:
                         outptr5 = dummyres;
                         break;
@@ -80,24 +98,24 @@
                 }
             }
 
-            if (beta == 0.0f) {
-                /* If beta=0, don't read the original input at all. */
+            if (append) {
+               /* Append mode: Read, activate, write. */
 
                 /* For ragged X, manually copy over the valid results. */
                 if ((i+7) >= xmax) {
                     for (int xi=0; xi<8; xi++) {
                         if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]);
+                            *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                             outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 8]);
+                            *outptr1 = std::min(std::max(minval, inptr[xi + 8] + *outptr1), maxval);
                             outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 16]);
+                            *outptr2 = std::min(std::max(minval, inptr[xi + 16] + *outptr2), maxval);
                             outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 24]);
+                            *outptr3 = std::min(std::max(minval, inptr[xi + 24] + *outptr3), maxval);
                             outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 32]);
+                            *outptr4 = std::min(std::max(minval, inptr[xi + 32] + *outptr4), maxval);
                             outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 40]);
+                            *outptr5 = std::min(std::max(minval, inptr[xi + 40] + *outptr5), maxval);
                             outptr5++;
                         }
                     }
@@ -107,69 +125,100 @@
                     __asm __volatile (
                         // Rows 0-1
                         "VLD1.32	{d0-d3},   [%[inptr]]!\n"
+                        "VLD1.32	{d8-d11},  [%[outptr0]]\n"
                         "VLD1.32	{d4-d7},   [%[inptr]]!\n"
+                        "VLD1.32	{d12-d15}, [%[outptr1]]\n"
 
-                        "VMUL.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[inptr], #352]")
-                        "VMUL.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr0]]!\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q2\n"
+                        "VADD.f32	q7, q7, q3\n"
                         ASM_PREFETCH("[%[inptr], #416]")
-                        "VMUL.f32	q6, q2, %q[av]\n"
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
                         ASM_PREFETCH("[%[inptr], #480]")
-                        "VMUL.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr0]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr1]]!\n"
 
                         // Rows 2-3
                         "VLD1.32	{d0-d3},   [%[inptr]]!\n"
+                        "VLD1.32	{d8-d11},  [%[outptr2]]\n"
                         "VLD1.32	{d4-d7},   [%[inptr]]!\n"
+                        "VLD1.32	{d12-d15}, [%[outptr3]]\n"
 
-                        "VMUL.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[outptr0], #96]")
-                        "VMUL.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr2]]!\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q2\n"
+                        "VADD.f32	q7, q7, q3\n"
                         ASM_PREFETCH("[%[outptr1], #96]")
-                        "VMUL.f32	q6, q2, %q[av]\n"
-                        ASM_PREFETCH("[%[outptr2], #96]")
-                        "VMUL.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
+                        ASM_PREFETCH("[%[outptr2], #128]")
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr2]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr3]]!\n"
 
                         // Rows 4-5
                         "VLD1.32	{d0-d3},   [%[inptr]]!\n"
+                        "VLD1.32	{d8-d11},  [%[outptr4]]\n"
                         "VLD1.32	{d4-d7},   [%[inptr]]!\n"
+                        "VLD1.32	{d12-d15}, [%[outptr5]]\n"
 
-                        "VMUL.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[outptr3], #96]")
-                        "VMUL.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr4]]!\n"
-                        ASM_PREFETCH("[%[outptr4], #96]")
-                        "VMUL.f32	q6, q2, %q[av]\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q2\n"
+                        "VADD.f32	q7, q7, q3\n"
+                        ASM_PREFETCH("[%[outptr4], #128]")
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
                         ASM_PREFETCH("[%[outptr5], #128]")
-                        "VMUL.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr4]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr5]]!\n"
                     : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
                       [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
+                    : [minv] "w" (minv), [maxv] "w" (maxv)
+                    : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "memory"
                     );
                 }
             } else {
-                /* Non-zero beta: Read output and apply beta. */
+                /* Bias mode: Add bias to everything, then min/max/write as before. */
+                const float *biasptr = bias ? bias + i : nullbias;
 
                 /* For ragged X, manually copy over the valid results. */
                 if ((i+7) >= xmax) {
-                    for (int xi=0; xi<8; xi++) {
+                    for (int xi=0; xi<7; xi++) {
                         if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                            *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                             outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 8]) + (*outptr1 * beta);
+                            *outptr1 = std::min(std::max(minval, inptr[xi + 8] + biasptr[xi]), maxval);
                             outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 16]) + (*outptr2 * beta);
+                            *outptr2 = std::min(std::max(minval, inptr[xi + 16] + biasptr[xi]), maxval);
                             outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 24]) + (*outptr3 * beta);
+                            *outptr3 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                             outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 32]) + (*outptr4 * beta);
+                            *outptr4 = std::min(std::max(minval, inptr[xi + 32] + biasptr[xi]), maxval);
                             outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 40]) + (*outptr5 * beta);
+                            *outptr5 = std::min(std::max(minval, inptr[xi + 40] + biasptr[xi]), maxval);
                             outptr5++;
                         }
                     }
@@ -178,68 +227,75 @@
                     /* Optimized routine to copy an entire block */
                     __asm __volatile (
                         // Rows 0-1
-                        "VLD1.32	{d8-d11},  [%[outptr0]]\n"
-                        "VMUL.f32	q4, q4, %q[bv]\n"
-                        "VLD1.32	{d12-d15}, [%[outptr1]]\n"
-                        "VMUL.f32	q5, q5, %q[bv]\n"
-                        "VLD1.32	{d0-d3},   [%[inptr]]!\n"
-                        "VMUL.f32	q6, q6, %q[bv]\n"
-                        "VLD1.32	{d4-d7},   [%[inptr]]!\n"
-                        "VMUL.f32	q7, q7, %q[bv]\n"
+                        "VLD1.32	{d8-d11},   [%[inptr]]!\n"
+                        "VLD1.32	{d0-d3},   [%[biasptr]]\n"
+                        "VLD1.32	{d12-d15},  [%[inptr]]!\n"
 
-                        "VMLA.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[inptr], #352]")
-                        "VMLA.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr0]]!\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q0\n"
+                        "VADD.f32	q7, q7, q1\n"
                         ASM_PREFETCH("[%[inptr], #416]")
-                        "VMLA.f32	q6, q2, %q[av]\n"
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
                         ASM_PREFETCH("[%[inptr], #480]")
-                        "VMLA.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr0]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr1]]!\n"
 
                         // Rows 2-3
-                        "VLD1.32	{d8-d11},  [%[outptr2]]\n"
-                        "VMUL.f32	q4, q4, %q[bv]\n"
-                        "VLD1.32	{d12-d15}, [%[outptr3]]\n"
-                        "VMUL.f32	q5, q5, %q[bv]\n"
-                        "VLD1.32	{d0-d3},   [%[inptr]]!\n"
-                        "VMUL.f32	q6, q6, %q[bv]\n"
-                        "VLD1.32	{d4-d7},   [%[inptr]]!\n"
-                        "VMUL.f32	q7, q7, %q[bv]\n"
+                        "VLD1.32	{d8-d11},   [%[inptr]]!\n"
+                        "VLD1.32	{d12-d15},  [%[inptr]]!\n"
 
-                        "VMLA.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[outptr0], #96]")
-                        "VMLA.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr2]]!\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q0\n"
+                        "VADD.f32	q7, q7, q1\n"
                         ASM_PREFETCH("[%[outptr1], #96]")
-                        "VMLA.f32	q6, q2, %q[av]\n"
-                        ASM_PREFETCH("[%[outptr2], #96]")
-                        "VMLA.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
+                        ASM_PREFETCH("[%[outptr2], #128]")
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr2]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr3]]!\n"
 
                         // Rows 4-5
-                        "VLD1.32	{d8-d11},  [%[outptr4]]\n"
-                        "VMUL.f32	q4, q4, %q[bv]\n"
-                        "VLD1.32	{d12-d15}, [%[outptr5]]\n"
-                        "VMUL.f32	q5, q5, %q[bv]\n"
-                        "VLD1.32	{d0-d3},   [%[inptr]]!\n"
-                        "VMUL.f32	q6, q6, %q[bv]\n"
-                        "VLD1.32	{d4-d7},   [%[inptr]]!\n"
-                        "VMUL.f32	q7, q7, %q[bv]\n"
+                        "VLD1.32	{d8-d11},   [%[inptr]]!\n"
+                        "VLD1.32	{d12-d15},  [%[inptr]]!\n"
 
-                        "VMLA.f32	q4, q0, %q[av]\n"
+                        "VADD.f32	q4, q4, q0\n"
                         ASM_PREFETCH("[%[outptr3], #96]")
-                        "VMLA.f32	q5, q1, %q[av]\n"
-                        "VST1.32	{d8-d11}, [%[outptr4]]!\n"
-                        ASM_PREFETCH("[%[outptr4], #96]")
-                        "VMLA.f32	q6, q2, %q[av]\n"
+                        "VADD.f32	q5, q5, q1\n"
+                        "VADD.f32	q6, q6, q0\n"
+                        "VADD.f32	q7, q7, q1\n"
+                        ASM_PREFETCH("[%[outptr4], #128]")
+                        "VMAX.f32	q4, q4, %q[minv]\n"
+                        "VMAX.f32	q5, q5, %q[minv]\n"
+                        "VMAX.f32	q6, q6, %q[minv]\n"
                         ASM_PREFETCH("[%[outptr5], #128]")
-                        "VMLA.f32	q7, q3, %q[av]\n"
+                        "VMAX.f32	q7, q7, %q[minv]\n"
+                        "VMIN.f32	q4, q4, %q[maxv]\n"
+                        "VMIN.f32	q5, q5, %q[maxv]\n"
+                        "VST1.32	{d8-d11}, [%[outptr4]]!\n"
+                        "VMIN.f32	q6, q6, %q[maxv]\n"
+                        "VMIN.f32	q7, q7, %q[maxv]\n"
                         "VST1.32	{d12-d15}, [%[outptr5]]!\n"
                     : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
                       [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
+                    : [minv] "w" (minv), [maxv] "w" (maxv), [biasptr] "r" (biasptr)
+                    : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "memory"
                     );
                 }
             }
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp
deleted file mode 100644
index 9fca4e3..0000000
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __aarch64__
-
-template<>
-inline void MergeResults<12, 8, false>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta) {
-    const float *inptr = in;
-    prefetch_6x(inptr);
-    prefetch_6x(inptr + 96);
-
-    float32x4_t av = vdupq_n_f32(alpha);
-    float32x4_t bv = vdupq_n_f32(beta);
-
-    for (int y=y0; y<ymax; y+=8) {
-        float *outptr0 = out + (y * ldout) + x0;
-        float *outptr1 = outptr0 + ldout;
-        float *outptr2 = outptr1 + ldout;
-        float *outptr3 = outptr2 + ldout;
-        float *outptr4 = outptr3 + ldout;
-        float *outptr5 = outptr4 + ldout;
-        float *outptr6 = outptr5 + ldout;
-        float *outptr7 = outptr6 + ldout;
-
-        prefetch_2x(outptr0);
-        prefetch_2x(outptr1);
-        prefetch_2x(outptr2);
-        prefetch_2x(outptr3);
-        prefetch_2x(outptr4);
-        prefetch_2x(outptr5);
-        prefetch_2x(outptr6);
-        prefetch_2x(outptr7);
-
-        for (int i=x0; i<xmax; i+=12) {
-            float dummyres[12];
-
-            /* Make sure we throw away results if Y isn't a multiple of 8.
-             * We do this by pointing the result pointer at a dummy buffer
-             * we later discard.  */
-            if ((y+7) >= ymax) {
-                switch ((y + 7) - ymax) {
-                    case 6:
-                        outptr1 = dummyres;
-                        // fall through
-                    case 5:
-                        outptr2 = dummyres;
-                        // fall through
-                    case 4:
-                        outptr3 = dummyres;
-                        // fall through
-                    case 3:
-                        outptr4 = dummyres;
-                        // fall through
-                    case 2:
-                        outptr5 = dummyres;
-                        // fall through
-                    case 1:
-                        outptr6 = dummyres;
-                        // fall through
-                    case 0:
-                        outptr7 = dummyres;
-                        break;
-
-                    default:
-                        UNREACHABLE("Impossible.");
-                }
-            }
-
-            if (beta==0.0f) {
-                /* If beta==0, don't read the original input at all. */
-
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+11) >= xmax) {
-                    for (int xi=0; xi<12; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 12]);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 24]);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 36]);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 48]);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 60]);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 72]);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 84]);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 96;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Rows 0-1
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr0]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr0]], #16\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr1]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr1]], #16\n"
-
-                        // Rows 2-3
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr2]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr2]], #16\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr3]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr3]], #16\n"
-
-                        // Rows 4-5
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr4]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr4]], #16\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr5]], #32\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr5]], #16\n"
-
-                        // Rows 6-7
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr6]], #32\n"
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr6]], #16\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr7]], #32\n"
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr7]], #16\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            } else {
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+11) >= xmax) {
-                    for (int xi=0; xi<12; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 96;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Rows 0-1
-                        "LDP	q16, q17, [%[outptr0]]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDR	q18, [%[outptr0], #32]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDP	q19, q20, [%[outptr1]]\n"
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        "LDR	q21, [%[outptr1], #32]\n"
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr0]], #32\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr0]], #16\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr1]], #32\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr1]], #16\n"
-
-                        // Rows 2-3
-                        "LDP	q16, q17, [%[outptr2]]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDR	q18, [%[outptr2], #32]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDP	q19, q20, [%[outptr3]]\n"
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        "LDR	q21, [%[outptr3], #32]\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr2]], #32\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr2]], #16\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr3]], #32\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr3]], #16\n"
-
-                        // Rows 4-5
-                        ASM_PREFETCH("[%[outptr0], #80]")
-                        "LDP	q16, q17, [%[outptr4]]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDR	q18, [%[outptr4], #32]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDP	q19, q20, [%[outptr5]]\n"
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        "LDR	q21, [%[outptr5], #32]\n"
-                        ASM_PREFETCH("[%[outptr1], #80]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr2], #80]")
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr4]], #32\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr4]], #16\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr3], #80]")
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr5]], #32\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr5]], #16\n"
-
-                        // Rows 6-7
-                        ASM_PREFETCH("[%[outptr4], #80]")
-                        "LDP	q16, q17, [%[outptr6]]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDR	q18, [%[outptr6], #32]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDP	q19, q20, [%[outptr7]]\n"
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        "LDR	q21, [%[outptr7], #32]\n"
-                        ASM_PREFETCH("[%[outptr5], #80]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr6], #128]")
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "STP	q16, q17, [%[outptr6]], #32\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q18, [%[outptr6]], #16\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr7], #128]")
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "STP	q19, q20, [%[outptr7]], #32\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "STR	q21, [%[outptr7]], #16\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            }
-        }
-    }
-}
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp
deleted file mode 100644
index 0e638ee..0000000
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-// This should be possible on any AArch64 target, but some old compilers don't support __fp16 arguments.
-#if defined(__aarch64__) && defined(__ARM_FP16_ARGS)
-
-#include <arm_neon.h>
-
-template<>
-inline void MergeResults<12,8,false>(__fp16 *out, const float *in, int ldout, int y0, int ymax, int x0, int xmax, const __fp16 alpha, const __fp16 beta) {
-    const float *inptr = in;
-    prefetch_6x(inptr);
-    prefetch_6x(inptr + 24);
-
-    float32x4_t av = vdupq_n_f32(alpha);
-    float32x4_t bv = vdupq_n_f32(beta);
-
-    for (int y=y0; y<ymax; y+=8) {
-        __fp16 *outptr0 = out + (y * ldout) + x0;
-        __fp16 *outptr1 = outptr0 + ldout;
-        __fp16 *outptr2 = outptr1 + ldout;
-        __fp16 *outptr3 = outptr2 + ldout;
-        __fp16 *outptr4 = outptr3 + ldout;
-        __fp16 *outptr5 = outptr4 + ldout;
-        __fp16 *outptr6 = outptr5 + ldout;
-        __fp16 *outptr7 = outptr6 + ldout;
-
-        prefetch_2x(outptr0);
-        prefetch_2x(outptr1);
-        prefetch_2x(outptr2);
-        prefetch_2x(outptr3);
-        prefetch_2x(outptr4);
-        prefetch_2x(outptr5);
-        prefetch_2x(outptr6);
-        prefetch_2x(outptr7);
-
-        for (int i=x0; i<xmax; i+=12) {
-            __fp16 dummyres[12];
-
-            /* Make sure we throw away results if Y isn't a multiple of 8.
-             * We do this by pointing the result pointer at a dummy buffer
-             * we later discard.  */
-            if ((y+7) >= ymax) {
-                switch ((y + 7) - ymax) {
-                    case 6:
-                        outptr1 = dummyres;
-                        // fall through
-                    case 5:
-                        outptr2 = dummyres;
-                        // fall through
-                    case 4:
-                        outptr3 = dummyres;
-                        // fall through
-                    case 3:
-                        outptr4 = dummyres;
-                        // fall through
-                    case 2:
-                        outptr5 = dummyres;
-                        // fall through
-                    case 1:
-                        outptr6 = dummyres;
-                        // fall through
-                    case 0:
-                        outptr7 = dummyres;
-                        break;
-
-                    default:
-                        UNREACHABLE("Impossible.");
-                }
-            }
-
-            if (beta == ((__fp16)0.0f)) {
-                /* If beta==0, don't read the output. */
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+11) >= xmax) {
-                    for (int xi=0; xi<12; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 12]);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 24]);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 36]);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 48]);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 60]);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 72]);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 84]);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 96;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Rows 0-1
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FCVTN	v16.4h, v16.4s\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr0]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr0]], #8\n"
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr1]], #16\n"
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr1]], #8\n"
-
-                        // Rows 2-3
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FCVTN	v16.4h, v16.4s\n"
-                        ASM_PREFETCH("[%[outptr0], #64]")
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        ASM_PREFETCH("[%[outptr1], #64]")
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr2]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr2]], #8\n"
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr3]], #16\n"
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr3]], #8\n"
-
-                        // Rows 4-5
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr2], #64]")
-                        "FCVTN	v16.4h, v16.4s\n"
-                        ASM_PREFETCH("[%[outptr3], #64]")
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        ASM_PREFETCH("[%[outptr4], #88]")
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr4]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr4]], #8\n"
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr5]], #16\n"
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr5]], #8\n"
-
-                        // Rows 6-7
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FMUL	v16.4s, v0.4s, %[av].4s\n"
-                        "FMUL	v17.4s, v1.4s, %[av].4s\n"
-                        ASM_PREFETCH("[%[outptr5], #64]")
-                        "FCVTN	v16.4h, v16.4s\n"
-                        ASM_PREFETCH("[%[outptr6], #88]")
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        ASM_PREFETCH("[%[outptr7], #88]")
-                        "FMUL	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr6]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr6]], #8\n"
-                        "FMUL	v19.4s, v3.4s, %[av].4s\n"
-                        "FMUL	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr7]], #16\n"
-                        "FMUL	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr7]], #8\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            } else {
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+11) >= xmax) {
-                    for (int xi=0; xi<12; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 96;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Rows 0-1
-                        "LDR	q16, [%[outptr0]]\n"
-                        "FCVTL2	v17.4s, v16.8h\n"
-                        "LDR	d18, [%[outptr0], #16]\n"
-                        "FCVTL	v16.4s, v16.4h\n"
-                        "LDR	q19, [%[outptr1]]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDR	d21, [%[outptr1], #16]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "FCVTL	v18.4s, v18.4h\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "FCVTL2	v20.4s, v19.8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FCVTL	v19.4s, v19.4h\n"
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "FCVTL	v21.4s, v21.4h\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "FCVTN	v16.4h, v16.4s\n"
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr0]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr0]], #8\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr1]], #16\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr1]], #8\n"
-
-                        // Rows 2-3
-                        "LDR	q16, [%[outptr2]]\n"
-                        "FCVTL2	v17.4s, v16.8h\n"
-                        "LDR	d18, [%[outptr2], #16]\n"
-                        "FCVTL	v16.4s, v16.4h\n"
-                        "LDR	q19, [%[outptr3]]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDR	d21, [%[outptr3], #16]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "FCVTL	v18.4s, v18.4h\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "FCVTL2	v20.4s, v19.8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FCVTL	v19.4s, v19.4h\n"
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "FCVTL	v21.4s, v21.4h\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[outptr0], #64]")
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[outptr1], #64]")
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "FCVTN	v16.4h, v16.4s\n"
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr2]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr2]], #8\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr3]], #16\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr3]], #8\n"
-
-                        // Rows 4-5
-                        "LDR	q16, [%[outptr4]]\n"
-                        "FCVTL2	v17.4s, v16.8h\n"
-                        "LDR	d18, [%[outptr4], #16]\n"
-                        "FCVTL	v16.4s, v16.4h\n"
-                        "LDR	q19, [%[outptr5]]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDR	d21, [%[outptr5], #16]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "FCVTL	v18.4s, v18.4h\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "FCVTL2	v20.4s, v19.8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FCVTL	v19.4s, v19.4h\n"
-                        ASM_PREFETCH("[%[outptr2], #64]")
-                        "FCVTL	v21.4s, v21.4h\n"
-                        ASM_PREFETCH("[%[outptr3], #64]")
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[outptr4], #88]")
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "FCVTN	v16.4h, v16.4s\n"
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr4]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr4]], #8\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr5]], #16\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr5]], #8\n"
-
-                        // Rows 6-7
-                        "LDR	q16, [%[outptr6]]\n"
-                        "FCVTL2	v17.4s, v16.8h\n"
-                        "LDR	d18, [%[outptr6], #16]\n"
-                        "FCVTL	v16.4s, v16.4h\n"
-                        "LDR	q19, [%[outptr7]]\n"
-                        "FMUL	v17.4s, v17.4s, %[bv].4s\n"
-                        "LDR	d21, [%[outptr7], #16]\n"
-                        "FMUL	v16.4s, v16.4s, %[bv].4s\n"
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "FCVTL	v18.4s, v18.4h\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "FCVTL2	v20.4s, v19.8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FCVTL	v19.4s, v19.4h\n"
-                        ASM_PREFETCH("[%[outptr5], #64]")
-                        "FCVTL	v21.4s, v21.4h\n"
-                        ASM_PREFETCH("[%[outptr6], #88]")
-                        "FMUL	v18.4s, v18.4s, %[bv].4s\n"
-                        ASM_PREFETCH("[%[outptr7], #88]")
-                        "FMUL	v20.4s, v20.4s, %[bv].4s\n"
-                        "FMUL	v19.4s, v19.4s, %[bv].4s\n"
-                        "FMUL	v21.4s, v21.4s, %[bv].4s\n"
-                        "FMLA	v16.4s, v0.4s, %[av].4s\n"
-                        "FMLA	v17.4s, v1.4s, %[av].4s\n"
-                        "FCVTN	v16.4h, v16.4s\n"
-                        "FCVTN2	v16.8h, v17.4s\n"
-                        "FMLA	v18.4s, v2.4s, %[av].4s\n"
-                        "STR	q16, [%[outptr6]], #16\n"
-                        "FCVTN	v18.4h, v18.4s\n"
-                        "STR	d18, [%[outptr6]], #8\n"
-                        "FMLA	v19.4s, v3.4s, %[av].4s\n"
-                        "FMLA	v20.4s, v4.4s, %[av].4s\n"
-                        "FCVTN	v19.4h, v19.4s\n"
-                        "FCVTN2	v19.8h, v20.4s\n"
-                        "STR	q19, [%[outptr7]], #16\n"
-                        "FMLA	v21.4s, v5.4s, %[av].4s\n"
-                        "FCVTN	v21.4h, v21.4s\n"
-                        "STR	d21, [%[outptr7]], #8\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [av] "w" (av), [bv] "w" (bv)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            }
-        }
-    }
-}
-
-#endif // __aarch64__ && __ARM_FP16_ARGS
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
new file mode 100644
index 0000000..7bfab41
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
@@ -0,0 +1,2074 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+
+template<>
+void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const __fp16 *bias, Activation act, bool append)
+{
+    const __fp16 *inptr = in;
+    __fp16 nullbias[24] = { 0 };
+    __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+    __fp16 maxval =   static_cast<__fp16>(std::numeric_limits<float>::infinity());
+
+    switch(act.type)
+    {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            maxval = static_cast<__fp16>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            minval = 0.0f;
+            break;
+    }
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (24 * sizeof(__fp16)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
+        __fp16 *outptr0 = out + (y * ldout) + x0;
+        __fp16 *outptr1 = outptr0 + ldout;
+        __fp16 *outptr2 = outptr1 + ldout;
+        __fp16 *outptr3 = outptr2 + ldout;
+        __fp16 *outptr4 = outptr3 + ldout;
+        __fp16 *outptr5 = outptr4 + ldout;
+        __fp16 *outptr6 = outptr5 + ldout;
+        __fp16 *outptr7 = outptr6 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=24)
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                                ".arch  armv8.2-a+fp16\n"
+#endif
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + *outptr3)), maxval);
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q11, [%[outptr3]]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + *outptr3)), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + *outptr4)), maxval);
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + *outptr3)), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + *outptr4)), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + *outptr5)), maxval);
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "str q17, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + *outptr3)), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + *outptr4)), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + *outptr5)), maxval);
+                                    outptr5++;
+                                    *outptr6 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 144] + *outptr6)), maxval);
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "str q17, [%[outptr5]]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr6]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + *outptr0)), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + *outptr1)), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + *outptr2)), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + *outptr3)), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + *outptr4)), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + *outptr5)), maxval);
+                                    outptr5++;
+                                    *outptr6 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 144] + *outptr6)), maxval);
+                                    outptr6++;
+                                    *outptr7 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 168] + *outptr7)), maxval);
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "fadd v10.8h, v10.8h, v2.8h\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "fmin v10.8h, v10.8h, v0.8h\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v11.8h, v11.8h, v3.8h\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "fadd v12.8h, v12.8h, v4.8h\n"
+                                "ldr q7, [%[outptr7]]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v10.8h, v10.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0x150]\n"
+                                "fmin v11.8h, v11.8h, v0.8h\n"
+                                "ldr q8, [%[outptr7], #0x10]\n"
+                                "fmin v12.8h, v12.8h, v0.8h\n"
+                                "str q17, [%[outptr5]]\n"
+                                "fadd v13.8h, v13.8h, v5.8h\n"
+                                "ldr q16, [%[inptr], #0x160]\n"
+                                "fadd v14.8h, v14.8h, v6.8h\n"
+                                "ldr q9, [%[outptr7], #0x20]\n"
+                                "fmax v11.8h, v11.8h, v1.8h\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "fmax v12.8h, v12.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x170]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "fadd v15.8h, v15.8h, v7.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q12, [%[outptr6]]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "fadd v16.8h, v16.8h, v8.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v17.8h, v17.8h, v9.8h\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "str q15, [%[outptr7]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q16, [%[outptr7], #0x10]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "str q17, [%[outptr7], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const __fp16 *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + biasptr[xi])), maxval);
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "fadd v14.8h, v14.8h, v2.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "fadd v15.8h, v15.8h, v3.8h\n"
+                                "fadd v16.8h, v16.8h, v4.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + biasptr[xi])), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + biasptr[xi])), maxval);
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.8h, v14.8h, v2.8h\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fadd v15.8h, v15.8h, v3.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fadd v16.8h, v16.8h, v4.8h\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fadd v17.8h, v17.8h, v2.8h\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "fadd v18.8h, v18.8h, v3.8h\n"
+                                "fadd v19.8h, v19.8h, v4.8h\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + biasptr[xi])), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + biasptr[xi])), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + biasptr[xi])), maxval);
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.8h, v14.8h, v2.8h\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.8h, v15.8h, v3.8h\n"
+                                "fadd v16.8h, v16.8h, v4.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v2.8h\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fadd v18.8h, v18.8h, v3.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fadd v19.8h, v19.8h, v4.8h\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "fadd v20.8h, v20.8h, v2.8h\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "fadd v13.8h, v13.8h, v3.8h\n"
+                                "fadd v14.8h, v14.8h, v4.8h\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + biasptr[xi])), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + biasptr[xi])), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + biasptr[xi])), maxval);
+                                    outptr5++;
+                                    *outptr6 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 144] + biasptr[xi])), maxval);
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.8h, v14.8h, v2.8h\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.8h, v15.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v4.8h\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v2.8h\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fadd v18.8h, v18.8h, v3.8h\n"
+                                "fadd v19.8h, v19.8h, v4.8h\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fadd v20.8h, v20.8h, v2.8h\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "fadd v13.8h, v13.8h, v3.8h\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fadd v14.8h, v14.8h, v4.8h\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "fadd v15.8h, v15.8h, v2.8h\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "fadd v16.8h, v16.8h, v3.8h\n"
+                                "fadd v17.8h, v17.8h, v4.8h\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "str q15, [%[outptr6]]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+23) >= xmax)
+                        {
+                            for (int xi=0; xi<23; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi] + biasptr[xi])), maxval);
+                                    outptr0++;
+                                    *outptr1 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 24] + biasptr[xi])), maxval);
+                                    outptr1++;
+                                    *outptr2 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 48] + biasptr[xi])), maxval);
+                                    outptr2++;
+                                    *outptr3 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 72] + biasptr[xi])), maxval);
+                                    outptr3++;
+                                    *outptr4 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 96] + biasptr[xi])), maxval);
+                                    outptr4++;
+                                    *outptr5 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 120] + biasptr[xi])), maxval);
+                                    outptr5++;
+                                    *outptr6 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 144] + biasptr[xi])), maxval);
+                                    outptr6++;
+                                    *outptr7 = std::min(std::max(minval, static_cast<__fp16>(inptr[xi + 168] + biasptr[xi])), maxval);
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 192;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "dup v0.8h, %[maxval].h[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.8h, %[minval].h[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "fadd v13.8h, v13.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.8h, v14.8h, v3.8h\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.8h, v15.8h, v4.8h\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v2.8h\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.8h, v17.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.8h, v18.8h, v4.8h\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.8h, v19.8h, v2.8h\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.8h, v20.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.8h, v13.8h, v4.8h\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.8h, v14.8h, v2.8h\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.8h, v15.8h, v3.8h\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "fadd v16.8h, v16.8h, v4.8h\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.8h, v17.8h, v2.8h\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fadd v18.8h, v18.8h, v3.8h\n"
+                                "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "fadd v19.8h, v19.8h, v4.8h\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fadd v20.8h, v20.8h, v2.8h\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "fadd v13.8h, v13.8h, v3.8h\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "fmin v13.8h, v13.8h, v0.8h\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fadd v14.8h, v14.8h, v4.8h\n"
+                                "ldr q18, [%[inptr], #0x150]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "fmax v13.8h, v13.8h, v1.8h\n"
+                                "fmin v14.8h, v14.8h, v0.8h\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "fadd v15.8h, v15.8h, v2.8h\n"
+                                "ldr q19, [%[inptr], #0x160]\n"
+                                "fadd v16.8h, v16.8h, v3.8h\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v14.8h, v14.8h, v1.8h\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmin v15.8h, v15.8h, v0.8h\n"
+                                "ldr q20, [%[inptr], #0x170]\n"
+                                "fmin v16.8h, v16.8h, v0.8h\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v17.8h, v17.8h, v4.8h\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "fmax v15.8h, v15.8h, v1.8h\n"
+                                "fmax v16.8h, v16.8h, v1.8h\n"
+                                "fadd v18.8h, v18.8h, v2.8h\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "fmin v17.8h, v17.8h, v0.8h\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "fadd v19.8h, v19.8h, v3.8h\n"
+                                "str q15, [%[outptr6]]\n"
+                                "fmin v18.8h, v18.8h, v0.8h\n"
+                                "fmax v17.8h, v17.8h, v1.8h\n"
+                                "fadd v20.8h, v20.8h, v4.8h\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "fmin v19.8h, v19.8h, v0.8h\n"
+                                "fmax v18.8h, v18.8h, v1.8h\n"
+                                "fmin v20.8h, v20.8h, v0.8h\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "fmax v19.8h, v19.8h, v1.8h\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "fmax v20.8h, v20.8h, v1.8h\n"
+                                "str q18, [%[outptr7]]\n"
+                                "str q19, [%[outptr7], #0x10]\n"
+                                "str q20, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_12x8.hpp
index fcdca59..0c5aa7e 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_12x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_12x8.hpp
@@ -26,11 +26,33 @@
 #ifdef __aarch64__
 
 template<>
-inline void MergeResults<12, 8, false>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+void MergeResults<12, 8, false>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float *bias, Activation act, bool append)
 {
     const float *inptr = in;
+    float nullbias[12] = { 0 };
+    float minval = - std::numeric_limits<float>::infinity();
+    float maxval =   std::numeric_limits<float>::infinity();
 
-    for (int y=y0; y<ymax; y+=8) {
+    switch(act.type)
+    {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            minval = 0.0f;
+            break;
+    }
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (12 * sizeof(float)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
         float *outptr0 = out + (y * ldout) + x0;
         float *outptr1 = outptr0 + ldout;
         float *outptr2 = outptr1 + ldout;
@@ -42,19 +64,21 @@
 
         const int height = ymax - y;
 
-        for (int i=x0; i<xmax; i+=12) {
-            if (beta==0.0f)
+        for (int i=x0; i<xmax; i+=12)
+        {
+            if (append)
             {
-                switch(height) {
+                switch(height)
+                {
                 case 1:
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
                                 }
                             }
@@ -62,23 +86,34 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x10]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr0], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "add %[inptr], %[inptr], #0x180\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -88,13 +123,13 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
                                 }
                             }
@@ -102,35 +137,55 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x10]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x10]\n"
-                                "ldr q7, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -140,15 +195,15 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
                                 }
                             }
@@ -156,47 +211,76 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x10]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "str q11, [%[outptr0], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x40]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr1], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr2], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -206,17 +290,17 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + *outptr3), maxval);
                                     outptr3++;
                                 }
                             }
@@ -224,58 +308,96 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q4, [%[inptr], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x10]\n"
-                                "ldr q7, [%[inptr], #0xa0]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
                                 "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q11, [%[outptr3]]\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -285,19 +407,19 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + *outptr3), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + *outptr4), maxval);
                                     outptr4++;
                                 }
                             }
@@ -305,70 +427,117 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q5, [%[inptr], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr0], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr1], #0x10]\n"
-                                "ldr q7, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr2], #0x10]\n"
-                                "ldr q4, [%[inptr], #0xa0]\n"
-                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr3], #0x10]\n"
-                                "ldr q5, [%[inptr], #0xd0]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr4], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "ldr q5, [%[inptr], #0xb0]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x20]\n"
-                                "ldr q6, [%[inptr], #0xe0]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
                                 "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x20]\n"
-                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -378,21 +547,21 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + *outptr3), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + *outptr4), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + *outptr5), maxval);
                                     outptr5++;
                                 }
                             }
@@ -400,82 +569,138 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q6, [%[inptr], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x10]\n"
-                                "ldr q7, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x10]\n"
-                                "ldr q5, [%[inptr], #0xa0]\n"
-                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x10]\n"
-                                "ldr q6, [%[inptr], #0xd0]\n"
-                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x10]\n"
-                                "ldr q7, [%[inptr], #0x100]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr5], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
                                 "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "ldr q4, [%[inptr], #0xe0]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "str q17, [%[outptr5]]\n"
                                 "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x110]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
                                 "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "str q11, [%[outptr5], #0x20]\n"
                                 "add %[outptr5], %[outptr5], #0x30\n"
                                 "add %[inptr], %[inptr], #0x180\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -485,23 +710,23 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + *outptr3), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + *outptr4), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + *outptr5), maxval);
                                     outptr5++;
-                                    *outptr6 = (alpha * inptr[xi + 72]);
+                                    *outptr6 = std::min(std::max(minval, inptr[xi + 72] + *outptr6), maxval);
                                     outptr6++;
                                 }
                             }
@@ -509,94 +734,159 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q6, [%[inptr], #0x120]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6]]\n"
-                                "ldr q7, [%[inptr], #0x10]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "str q11, [%[outptr0], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr1], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr2], #0x10]\n"
-                                "ldr q6, [%[inptr], #0xa0]\n"
-                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr3], #0x10]\n"
-                                "ldr q7, [%[inptr], #0xd0]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr4], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x100]\n"
-                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr5], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x130]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr6], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "ldr q5, [%[inptr], #0xb0]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x20]\n"
-                                "ldr q6, [%[inptr], #0xe0]\n"
-                                "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x20]\n"
-                                "ldr q7, [%[inptr], #0x110]\n"
-                                "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
                                 "str q11, [%[outptr5], #0x20]\n"
-                                "ldr q4, [%[inptr], #0x140]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
                                 "add %[outptr5], %[outptr5], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr6], #0x20]\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
                                 "add %[outptr6], %[outptr6], #0x30\n"
                                 "add %[inptr], %[inptr], #0x180\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -607,25 +897,25 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + *outptr0), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + *outptr1), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + *outptr2), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + *outptr3), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + *outptr4), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + *outptr5), maxval);
                                     outptr5++;
-                                    *outptr6 = (alpha * inptr[xi + 72]);
+                                    *outptr6 = std::min(std::max(minval, inptr[xi + 72] + *outptr6), maxval);
                                     outptr6++;
-                                    *outptr7 = (alpha * inptr[xi + 84]);
+                                    *outptr7 = std::min(std::max(minval, inptr[xi + 84] + *outptr7), maxval);
                                     outptr7++;
                                 }
                             }
@@ -633,105 +923,179 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q4, [%[inptr]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[outptr0]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q6, [%[inptr], #0x120]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6]]\n"
-                                "ldr q7, [%[inptr], #0x150]\n"
-                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7]]\n"
-                                "ldr q4, [%[inptr], #0x10]\n"
-                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x40]\n"
-                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x70]\n"
-                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x10]\n"
-                                "ldr q7, [%[inptr], #0xa0]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x10]\n"
-                                "ldr q4, [%[inptr], #0xd0]\n"
-                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x10]\n"
-                                "ldr q5, [%[inptr], #0x100]\n"
-                                "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x10]\n"
-                                "ldr q6, [%[inptr], #0x130]\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6], #0x10]\n"
-                                "ldr q7, [%[inptr], #0x160]\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7], #0x10]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
                                 "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "fadd v10.4s, v10.4s, v2.4s\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "fmin v10.4s, v10.4s, v0.4s\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "fadd v11.4s, v11.4s, v3.4s\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "fadd v12.4s, v12.4s, v4.4s\n"
+                                "ldr q7, [%[outptr7]]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "fmax v10.4s, v10.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0x150]\n"
+                                "fmin v11.4s, v11.4s, v0.4s\n"
+                                "ldr q8, [%[outptr7], #0x10]\n"
+                                "fmin v12.4s, v12.4s, v0.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "fadd v13.4s, v13.4s, v5.4s\n"
+                                "ldr q16, [%[inptr], #0x160]\n"
+                                "fadd v14.4s, v14.4s, v6.4s\n"
+                                "ldr q9, [%[outptr7], #0x20]\n"
+                                "fmax v11.4s, v11.4s, v1.4s\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "fmax v12.4s, v12.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x170]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "fadd v15.4s, v15.4s, v7.4s\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "fadd v16.4s, v16.4s, v8.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "fadd v17.4s, v17.4s, v9.4s\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "ldr q4, [%[inptr], #0xe0]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "str q15, [%[outptr7]]\n"
                                 "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x20]\n"
-                                "ldr q5, [%[inptr], #0x110]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "str q16, [%[outptr7], #0x10]\n"
                                 "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x20]\n"
-                                "ldr q6, [%[inptr], #0x140]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "str q17, [%[outptr7], #0x20]\n"
                                 "add %[outptr5], %[outptr5], #0x30\n"
-                                "fmul v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6], #0x20]\n"
-                                "ldr q7, [%[inptr], #0x170]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
                                 "add %[outptr6], %[outptr6], #0x30\n"
-                                "fmul v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
                                 "add %[outptr7], %[outptr7], #0x30\n"
                                 "add %[inptr], %[inptr], #0x180\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -742,16 +1106,23 @@
             }
             else
             {
-                switch(height) {
+                const float *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
                 case 1:
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
                                 }
                             }
@@ -759,29 +1130,34 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr0], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x10]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr0], #0x10]\n"
-                                "ldr q10, [%[outptr0], #0x20]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -791,13 +1167,13 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
                                 }
                             }
@@ -805,47 +1181,52 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr0], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x10]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x10]\n"
-                                "ldr q11, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x40]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x10]\n"
-                                "ldr q8, [%[outptr0], #0x20]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q9, [%[outptr1], #0x20]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -855,15 +1236,15 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
                                 }
                             }
@@ -871,65 +1252,70 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr0], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x10]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr0], #0x10]\n"
-                                "ldr q8, [%[outptr1], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x40]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr1], #0x10]\n"
-                                "ldr q9, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x70]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr2], #0x10]\n"
-                                "ldr q10, [%[outptr0], #0x20]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q11, [%[outptr1], #0x20]\n"
-                                "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q8, [%[outptr2], #0x20]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -939,17 +1325,17 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + biasptr[xi]), maxval);
                                     outptr3++;
                                 }
                             }
@@ -957,82 +1343,87 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr3]]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q8, [%[outptr0], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x10]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x10]\n"
-                                "ldr q9, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x40]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x10]\n"
-                                "ldr q10, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x70]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x10]\n"
-                                "ldr q11, [%[outptr3], #0x10]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xa0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x10]\n"
-                                "ldr q8, [%[outptr0], #0x20]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q9, [%[outptr1], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q10, [%[outptr2], #0x20]\n"
-                                "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q11, [%[outptr3], #0x20]\n"
-                                "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "fadd v14.4s, v14.4s, v2.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "fadd v15.4s, v15.4s, v3.4s\n"
+                                "fadd v16.4s, v16.4s, v4.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -1042,19 +1433,19 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + biasptr[xi]), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + biasptr[xi]), maxval);
                                     outptr4++;
                                 }
                             }
@@ -1062,100 +1453,105 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr3]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q8, [%[outptr4]]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q9, [%[outptr0], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x10]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr0], #0x10]\n"
-                                "ldr q10, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x40]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr1], #0x10]\n"
-                                "ldr q11, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x70]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr2], #0x10]\n"
-                                "ldr q8, [%[outptr3], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xa0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr3], #0x10]\n"
-                                "ldr q9, [%[outptr4], #0x10]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xd0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr4], #0x10]\n"
-                                "ldr q10, [%[outptr0], #0x20]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q11, [%[outptr1], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q8, [%[outptr2], #0x20]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.4s, v14.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fadd v15.4s, v15.4s, v3.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "ldr q9, [%[outptr3], #0x20]\n"
-                                "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xb0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x20]\n"
-                                "ldr q10, [%[outptr4], #0x20]\n"
-                                "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0xe0]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x20]\n"
-                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fadd v16.4s, v16.4s, v4.4s\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fadd v17.4s, v17.4s, v2.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "fadd v18.4s, v18.4s, v3.4s\n"
+                                "fadd v19.4s, v19.4s, v4.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -1165,21 +1561,21 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + biasptr[xi]), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + biasptr[xi]), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + biasptr[xi]), maxval);
                                     outptr5++;
                                 }
                             }
@@ -1187,118 +1583,123 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr3]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q8, [%[outptr4]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q9, [%[outptr5]]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q10, [%[outptr0], #0x10]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x10]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x10]\n"
-                                "ldr q11, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x40]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x10]\n"
-                                "ldr q8, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x70]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x10]\n"
-                                "ldr q9, [%[outptr3], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xa0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x10]\n"
-                                "ldr q10, [%[outptr4], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0xd0]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x10]\n"
-                                "ldr q11, [%[outptr5], #0x10]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x100]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr5], #0x10]\n"
-                                "ldr q8, [%[outptr0], #0x20]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q9, [%[outptr1], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q10, [%[outptr2], #0x20]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.4s, v14.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q11, [%[outptr3], #0x20]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.4s, v15.4s, v3.4s\n"
+                                "fadd v16.4s, v16.4s, v4.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fadd v18.4s, v18.4s, v3.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "ldr q8, [%[outptr4], #0x20]\n"
-                                "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xe0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x20]\n"
-                                "ldr q9, [%[outptr5], #0x20]\n"
-                                "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x110]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x20]\n"
-                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fadd v19.4s, v19.4s, v4.4s\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "fadd v20.4s, v20.4s, v2.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "fadd v13.4s, v13.4s, v3.4s\n"
+                                "fadd v14.4s, v14.4s, v4.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -1308,23 +1709,23 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + biasptr[xi]), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + biasptr[xi]), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + biasptr[xi]), maxval);
                                     outptr5++;
-                                    *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
+                                    *outptr6 = std::min(std::max(minval, inptr[xi + 72] + biasptr[xi]), maxval);
                                     outptr6++;
                                 }
                             }
@@ -1332,136 +1733,141 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr3]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q8, [%[outptr4]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q9, [%[outptr5]]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q10, [%[outptr6]]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x120]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6]]\n"
-                                "ldr q11, [%[outptr0], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x10]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr0], #0x10]\n"
-                                "ldr q8, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x40]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr1], #0x10]\n"
-                                "ldr q9, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x70]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr2], #0x10]\n"
-                                "ldr q10, [%[outptr3], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0xa0]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr3], #0x10]\n"
-                                "ldr q11, [%[outptr4], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xd0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr4], #0x10]\n"
-                                "ldr q8, [%[outptr5], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x100]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr5], #0x10]\n"
-                                "ldr q9, [%[outptr6], #0x10]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x130]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr6], #0x10]\n"
-                                "ldr q10, [%[outptr0], #0x20]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x20]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr0], #0x20]\n"
-                                "ldr q11, [%[outptr1], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x50]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr1], #0x20]\n"
-                                "ldr q8, [%[outptr2], #0x20]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.4s, v14.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x80]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr2], #0x20]\n"
-                                "ldr q9, [%[outptr3], #0x20]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.4s, v15.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xb0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr3], #0x20]\n"
-                                "ldr q10, [%[outptr4], #0x20]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fadd v18.4s, v18.4s, v3.4s\n"
+                                "fadd v19.4s, v19.4s, v4.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fadd v20.4s, v20.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "fadd v13.4s, v13.4s, v3.4s\n"
                                 "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0xe0]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr4], #0x20]\n"
-                                "ldr q11, [%[outptr5], #0x20]\n"
-                                "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x110]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr5], #0x20]\n"
-                                "ldr q8, [%[outptr6], #0x20]\n"
-                                "add %[outptr5], %[outptr5], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x140]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr6], #0x20]\n"
-                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fadd v14.4s, v14.4s, v4.4s\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "fadd v15.4s, v15.4s, v2.4s\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "fadd v16.4s, v16.4s, v3.4s\n"
+                                "fadd v17.4s, v17.4s, v4.4s\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "str q15, [%[outptr6]]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
@@ -1472,25 +1878,25 @@
                     {
                         if ((i+11) >= xmax)
                         {
-                            for (int xi=0; xi<12; xi++)
+                            for (int xi=0; xi<11; xi++)
                             {
                                 if ((i+xi) < xmax)
                                 {
-                                    *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+                                    *outptr0 = std::min(std::max(minval, inptr[xi] + biasptr[xi]), maxval);
                                     outptr0++;
-                                    *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+                                    *outptr1 = std::min(std::max(minval, inptr[xi + 12] + biasptr[xi]), maxval);
                                     outptr1++;
-                                    *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+                                    *outptr2 = std::min(std::max(minval, inptr[xi + 24] + biasptr[xi]), maxval);
                                     outptr2++;
-                                    *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+                                    *outptr3 = std::min(std::max(minval, inptr[xi + 36] + biasptr[xi]), maxval);
                                     outptr3++;
-                                    *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+                                    *outptr4 = std::min(std::max(minval, inptr[xi + 48] + biasptr[xi]), maxval);
                                     outptr4++;
-                                    *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+                                    *outptr5 = std::min(std::max(minval, inptr[xi + 60] + biasptr[xi]), maxval);
                                     outptr5++;
-                                    *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
+                                    *outptr6 = std::min(std::max(minval, inptr[xi + 72] + biasptr[xi]), maxval);
                                     outptr6++;
-                                    *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
+                                    *outptr7 = std::min(std::max(minval, inptr[xi + 84] + biasptr[xi]), maxval);
                                     outptr7++;
                                 }
                             }
@@ -1498,153 +1904,158 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-                                "ldr q8, [%[outptr0]]\n"
+                                "dup v0.4s, %[maxval].s[0]\n"
+                                "ldr q2, [%[biasptr]]\n"
+                                "dup v1.4s, %[minval].s[0]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr]]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0]]\n"
-                                "ldr q9, [%[outptr1]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x30]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1]]\n"
-                                "ldr q10, [%[outptr2]]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x60]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2]]\n"
-                                "ldr q11, [%[outptr3]]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x90]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3]]\n"
-                                "ldr q8, [%[outptr4]]\n"
-                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xc0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4]]\n"
-                                "ldr q9, [%[outptr5]]\n"
-                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0xf0]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5]]\n"
-                                "ldr q10, [%[outptr6]]\n"
+                                "fadd v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "fadd v14.4s, v14.4s, v3.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "fadd v15.4s, v15.4s, v4.4s\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v2.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
                                 "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x120]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6]]\n"
-                                "ldr q11, [%[outptr7]]\n"
-                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x150]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7]]\n"
-                                "ldr q8, [%[outptr0], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x10]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x10]\n"
-                                "ldr q9, [%[outptr1], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x40]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x10]\n"
-                                "ldr q10, [%[outptr2], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x70]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x10]\n"
-                                "ldr q11, [%[outptr3], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xa0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x10]\n"
-                                "ldr q8, [%[outptr4], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xd0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x10]\n"
-                                "ldr q9, [%[outptr5], #0x10]\n"
-                                "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x100]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x10]\n"
-                                "ldr q10, [%[outptr6], #0x10]\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x130]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6], #0x10]\n"
-                                "ldr q11, [%[outptr7], #0x10]\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x160]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7], #0x10]\n"
-                                "ldr q8, [%[outptr0], #0x20]\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0x20]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr0], #0x20]\n"
-                                "ldr q9, [%[outptr1], #0x20]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "fadd v17.4s, v17.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "fadd v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "fadd v19.4s, v19.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
                                 "add %[outptr0], %[outptr0], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x50]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr1], #0x20]\n"
-                                "ldr q10, [%[outptr2], #0x20]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "fadd v20.4s, v20.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "fadd v13.4s, v13.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "fadd v14.4s, v14.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
                                 "add %[outptr1], %[outptr1], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x80]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr2], #0x20]\n"
-                                "ldr q11, [%[outptr3], #0x20]\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q19, [%[outptr2]]\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "fadd v15.4s, v15.4s, v3.4s\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "fadd v16.4s, v16.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "fadd v17.4s, v17.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
                                 "add %[outptr2], %[outptr2], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0xb0]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr3], #0x20]\n"
-                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
+                                "str q14, [%[outptr3]]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "fadd v18.4s, v18.4s, v3.4s\n"
+                                "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "fadd v19.4s, v19.4s, v4.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "fadd v20.4s, v20.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "fadd v13.4s, v13.4s, v3.4s\n"
                                 "add %[outptr3], %[outptr3], #0x30\n"
-                                "fmul v8.4s, v8.4s, %[beta].s[0]\n"
-                                "ldr q4, [%[inptr], #0xe0]\n"
-                                "fmla v8.4s, v4.4s, %[alpha].s[0]\n"
-                                "str q8, [%[outptr4], #0x20]\n"
-                                "ldr q9, [%[outptr5], #0x20]\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "str q17, [%[outptr4]]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "fmin v13.4s, v13.4s, v0.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "fadd v14.4s, v14.4s, v4.4s\n"
+                                "ldr q18, [%[inptr], #0x150]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "fmax v13.4s, v13.4s, v1.4s\n"
+                                "fmin v14.4s, v14.4s, v0.4s\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "fadd v15.4s, v15.4s, v2.4s\n"
+                                "ldr q19, [%[inptr], #0x160]\n"
+                                "fadd v16.4s, v16.4s, v3.4s\n"
                                 "add %[outptr4], %[outptr4], #0x30\n"
-                                "fmul v9.4s, v9.4s, %[beta].s[0]\n"
-                                "ldr q5, [%[inptr], #0x110]\n"
-                                "fmla v9.4s, v5.4s, %[alpha].s[0]\n"
-                                "str q9, [%[outptr5], #0x20]\n"
-                                "ldr q10, [%[outptr6], #0x20]\n"
-                                "add %[outptr5], %[outptr5], #0x30\n"
-                                "fmul v10.4s, v10.4s, %[beta].s[0]\n"
-                                "ldr q6, [%[inptr], #0x140]\n"
-                                "fmla v10.4s, v6.4s, %[alpha].s[0]\n"
-                                "str q10, [%[outptr6], #0x20]\n"
-                                "ldr q11, [%[outptr7], #0x20]\n"
-                                "add %[outptr6], %[outptr6], #0x30\n"
-                                "fmul v11.4s, v11.4s, %[beta].s[0]\n"
-                                "ldr q7, [%[inptr], #0x170]\n"
-                                "fmla v11.4s, v7.4s, %[alpha].s[0]\n"
-                                "str q11, [%[outptr7], #0x20]\n"
-                                "add %[outptr7], %[outptr7], #0x30\n"
+                                "fmax v14.4s, v14.4s, v1.4s\n"
+                                "str q20, [%[outptr5]]\n"
+                                "fmin v15.4s, v15.4s, v0.4s\n"
+                                "ldr q20, [%[inptr], #0x170]\n"
+                                "fmin v16.4s, v16.4s, v0.4s\n"
                                 "add %[inptr], %[inptr], #0x180\n"
+                                "fadd v17.4s, v17.4s, v4.4s\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "fmax v15.4s, v15.4s, v1.4s\n"
+                                "fmax v16.4s, v16.4s, v1.4s\n"
+                                "fadd v18.4s, v18.4s, v2.4s\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "fmin v17.4s, v17.4s, v0.4s\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "fadd v19.4s, v19.4s, v3.4s\n"
+                                "str q15, [%[outptr6]]\n"
+                                "fmin v18.4s, v18.4s, v0.4s\n"
+                                "fmax v17.4s, v17.4s, v1.4s\n"
+                                "fadd v20.4s, v20.4s, v4.4s\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "fmin v19.4s, v19.4s, v0.4s\n"
+                                "fmax v18.4s, v18.4s, v1.4s\n"
+                                "fmin v20.4s, v20.4s, v0.4s\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "fmax v19.4s, v19.4s, v1.4s\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "fmax v20.4s, v20.4s, v1.4s\n"
+                                "str q18, [%[outptr7]]\n"
+                                "str q19, [%[outptr7], #0x10]\n"
+                                "str q20, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
                             : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                               [inptr] "+r" (inptr)
-                            : [alpha] "w" (alpha), [beta] "w" (beta)
-                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "memory"
+                            : [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
                             );
                         }
                     }
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp
deleted file mode 100644
index 60cc2f3..0000000
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-// AArch64 only, and either the FP16_KERNELS option set or the target explicitly supports FP16 vectors.
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
-
-template<>
-inline void MergeResults<24, 8>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax,
-                         const int x0, const int xmax, const __fp16 alpha, const __fp16 beta) {
-    const __fp16 *inptr = in;
-    prefetch_6x(inptr);
-    prefetch_6x(inptr + 48);
-
-    float16x8_t va = vdupq_n_f16(alpha);
-    float16x8_t vb = vdupq_n_f16(beta);
-
-    for (int y=y0; y<ymax; y+=8) {
-        __fp16 *outptr0 = out + (y * ldout) + x0;
-        __fp16 *outptr1 = outptr0 + ldout;
-        __fp16 *outptr2 = outptr1 + ldout;
-        __fp16 *outptr3 = outptr2 + ldout;
-        __fp16 *outptr4 = outptr3 + ldout;
-        __fp16 *outptr5 = outptr4 + ldout;
-        __fp16 *outptr6 = outptr5 + ldout;
-        __fp16 *outptr7 = outptr6 + ldout;
-
-        prefetch_2x(outptr0);
-        prefetch_2x(outptr1);
-        prefetch_2x(outptr2);
-        prefetch_2x(outptr3);
-        prefetch_2x(outptr4);
-        prefetch_2x(outptr5);
-        prefetch_2x(outptr6);
-        prefetch_2x(outptr7);
-
-        for (int i=x0; i<xmax; i+=24) {
-            __fp16 dummyres[24];
-
-            /* Make sure we throw away results if Y isn't a multiple of 8.
-             * We do this by pointing the result pointer at a dummy buffer
-             * we later discard.  */
-            if ((y+7) >= ymax) {
-                switch ((y + 7) - ymax) {
-                    case 6:
-                        outptr1 = dummyres;
-                        // fall through
-                    case 5:
-                        outptr2 = dummyres;
-                        // fall through
-                    case 4:
-                        outptr3 = dummyres;
-                        // fall through
-                    case 3:
-                        outptr4 = dummyres;
-                        // fall through
-                    case 2:
-                        outptr5 = dummyres;
-                        // fall through
-                    case 1:
-                        outptr6 = dummyres;
-                        // fall through
-                    case 0:
-                        outptr7 = dummyres;
-                        break;
-
-                    default:
-                        UNREACHABLE("Impossible.");
-
-                }
-            }
-
-            if (beta == (__fp16)0.0f) {
-                /* If beta===0, don't read the output. */
-
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+23) >= xmax) {
-                    for (int xi=0; xi<24; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 24]);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 48]);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 72]);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 96]);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 120]);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 144]);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 168]);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 192;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                        ".arch	armv8.2-a+fp16\n"
-#endif
-                        // Rows 0-1
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FMUL	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FMUL	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr0]], #32\n"
-                        "FMUL	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr0]], #16\n"
-                        "FMUL	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FMUL	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr1]], #32\n"
-                        "FMUL	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr1]], #16\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-
-                        // Rows 2-3
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FMUL	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FMUL	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr2]], #32\n"
-                        "FMUL	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr2]], #16\n"
-                        "FMUL	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr0], #80]")
-                        "FMUL	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr3]], #32\n"
-                        "FMUL	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr3]], #16\n"
-                        ASM_PREFETCH("[%[outptr1], #80]")
-
-                        // Rows 4-5
-                        ASM_PREFETCH("[%[outptr2], #80]")
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FMUL	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr3], #80]")
-                        "FMUL	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr4]], #32\n"
-                        "FMUL	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr4]], #16\n"
-                        "FMUL	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr4], #80]")
-                        "FMUL	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr5]], #32\n"
-                        "FMUL	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr5]], #16\n"
-
-                        // Rows 6-7
-                        ASM_PREFETCH("[%[outptr5], #80]")
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FMUL	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr6], #128]")
-                        "FMUL	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr6]], #32\n"
-                        "FMUL	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr6]], #16\n"
-                        "FMUL	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr7], #128]")
-                        "FMUL	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr7]], #32\n"
-                        "FMUL	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr7]], #16\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [va] "w" (va), [vb] "w" (vb)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            } else {
-                /* For ragged X, manually copy over the valid results. */
-                if ((i+23) >= xmax) {
-                    for (int xi=0; xi<24; xi++) {
-                        if ((i+xi) < xmax) {
-                            *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
-                            outptr0++;
-                            *outptr1 = (alpha * inptr[xi + 24]) + (*outptr1 * beta);
-                            outptr1++;
-                            *outptr2 = (alpha * inptr[xi + 48]) + (*outptr2 * beta);
-                            outptr2++;
-                            *outptr3 = (alpha * inptr[xi + 72]) + (*outptr3 * beta);
-                            outptr3++;
-                            *outptr4 = (alpha * inptr[xi + 96]) + (*outptr4 * beta);
-                            outptr4++;
-                            *outptr5 = (alpha * inptr[xi + 120]) + (*outptr5 * beta);
-                            outptr5++;
-                            *outptr6 = (alpha * inptr[xi + 144]) + (*outptr6 * beta);
-                            outptr6++;
-                            *outptr7 = (alpha * inptr[xi + 168]) + (*outptr7 * beta);
-                            outptr7++;
-                        }
-                    }
-                    inptr += 192;
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                        ".arch	armv8.2-a+fp16\n"
-#endif
-                        // Rows 0-1
-                        "LDP	q16, q17, [%[outptr0]]\n"
-                        "FMUL	v16.8h, v16.8h, %[vb].8h\n"
-                        "LDR	q18, [%[outptr0], #32]\n"
-                        "FMUL	v17.8h, v17.8h, %[vb].8h\n"
-                        "LDP	q19, q20, [%[outptr1]]\n"
-                        "FMUL	v18.8h, v18.8h, %[vb].8h\n"
-                        ASM_PREFETCH("[%[inptr], #768]")
-                        "LDR	q21, [%[outptr1], #32]\n"
-                        "FMUL	v19.8h, v19.8h, %[vb].8h\n"
-                        "LDP	q0,  q1,  [%[inptr]]\n"
-                        "FMUL	v20.8h, v20.8h, %[vb].8h\n"
-                        "LDP	q2,  q3,  [%[inptr], #32]\n"
-                        "FMUL	v21.8h, v21.8h, %[vb].8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #64]\n"
-                        "FMLA	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #832]")
-                        "FMLA	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr0]], #32\n"
-                        "FMLA	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr0]], #16\n"
-                        "FMLA	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #896]")
-                        "FMLA	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr1]], #32\n"
-                        "FMLA	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr1]], #16\n"
-                        ASM_PREFETCH("[%[inptr], #960]")
-
-                        // Rows 2-3
-                        "LDP	q16, q17, [%[outptr2]]\n"
-                        "FMUL	v16.8h, v16.8h, %[vb].8h\n"
-                        "LDR	q18, [%[outptr2], #32]\n"
-                        "FMUL	v17.8h, v17.8h, %[vb].8h\n"
-                        "LDP	q19, q20, [%[outptr3]]\n"
-                        "FMUL	v18.8h, v18.8h, %[vb].8h\n"
-                        ASM_PREFETCH("[%[inptr], #1024]")
-                        "LDR	q21, [%[outptr3], #32]\n"
-                        "FMUL	v19.8h, v19.8h, %[vb].8h\n"
-                        "LDP	q0,  q1,  [%[inptr], #96]\n"
-                        "FMUL	v20.8h, v20.8h, %[vb].8h\n"
-                        "LDP	q2,  q3,  [%[inptr], #128]\n"
-                        "FMUL	v21.8h, v21.8h, %[vb].8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #160]\n"
-                        "FMLA	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[inptr], #1088]")
-                        "FMLA	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr2]], #32\n"
-                        "FMLA	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr2]], #16\n"
-                        "FMLA	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr0], #80]")
-                        "FMLA	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr3]], #32\n"
-                        "FMLA	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr3]], #16\n"
-                        ASM_PREFETCH("[%[outptr1], #80]")
-
-                        // Rows 4-5
-                        "LDP	q16, q17, [%[outptr4]]\n"
-                        "FMUL	v16.8h, v16.8h, %[vb].8h\n"
-                        "LDR	q18, [%[outptr4], #32]\n"
-                        "FMUL	v17.8h, v17.8h, %[vb].8h\n"
-                        "LDP	q19, q20, [%[outptr5]]\n"
-                        "FMUL	v18.8h, v18.8h, %[vb].8h\n"
-                        ASM_PREFETCH("[%[outptr2], #80]")
-                        "LDR	q21, [%[outptr5], #32]\n"
-                        "FMUL	v19.8h, v19.8h, %[vb].8h\n"
-                        "LDP	q0,  q1,  [%[inptr], #192]\n"
-                        "FMUL	v20.8h, v20.8h, %[vb].8h\n"
-                        "LDP	q2,  q3,  [%[inptr], #224]\n"
-                        "FMUL	v21.8h, v21.8h, %[vb].8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #256]\n"
-                        "FMLA	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr3], #80]")
-                        "FMLA	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr4]], #32\n"
-                        "FMLA	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr4]], #16\n"
-                        "FMLA	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr4], #80]")
-                        "FMLA	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr5]], #32\n"
-                        "FMLA	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr5]], #16\n"
-
-                        // Rows 6-7
-                        "LDP	q16, q17, [%[outptr6]]\n"
-                        "FMUL	v16.8h, v16.8h, %[vb].8h\n"
-                        "LDR	q18, [%[outptr6], #32]\n"
-                        "FMUL	v17.8h, v17.8h, %[vb].8h\n"
-                        "LDP	q19, q20, [%[outptr7]]\n"
-                        ASM_PREFETCH("[%[outptr5], #80]")
-                        "FMUL	v18.8h, v18.8h, %[vb].8h\n"
-                        "LDR	q21, [%[outptr7], #32]\n"
-                        "FMUL	v19.8h, v19.8h, %[vb].8h\n"
-                        "LDP	q0,  q1,  [%[inptr], #288]\n"
-                        "FMUL	v20.8h, v20.8h, %[vb].8h\n"
-                        "LDP	q2,  q3,  [%[inptr], #320]\n"
-                        "FMUL	v21.8h, v21.8h, %[vb].8h\n"
-                        "LDP	q4,  q5,  [%[inptr], #352]\n"
-                        "FMLA	v16.8h, v0.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr6], #128]")
-                        "FMLA	v17.8h, v1.8h, %[va].8h\n"
-                        "STP	q16, q17, [%[outptr6]], #32\n"
-                        "FMLA	v18.8h, v2.8h, %[va].8h\n"
-                        "STR	q18, [%[outptr6]], #16\n"
-                        "FMLA	v19.8h, v3.8h, %[va].8h\n"
-                        ASM_PREFETCH("[%[outptr7], #128]")
-                        "FMLA	v20.8h, v4.8h, %[va].8h\n"
-                        "STP	q19, q20, [%[outptr7]], #32\n"
-                        "FMLA	v21.8h, v5.8h, %[va].8h\n"
-                        "STR	q21, [%[outptr7]], #16\n"
-                        "ADD	%[inptr], %[inptr], #384\n"
-                    : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
-                      [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                      [inptr] "+r" (inptr)
-                    : [va] "w" (va), [vb] "w" (vb)
-                    : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
-                    );
-                }
-            }
-        }
-    }
-}
-
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
deleted file mode 100644
index 410a0a1..0000000
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __aarch64__
-
-template<>
-inline void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t alpha, const int32_t beta) {
-    UNUSED(alpha);
-    const int32_t *inptr = in;
-    prefetch_6x(inptr);
-    prefetch_6x(inptr + 96);
-
-    for (int y=y0; y<ymax; y+=8) {
-        int32_t *outptr0 = out + (y * ldout) + x0;
-        int32_t *outptr1 = outptr0 + ldout;
-        int32_t *outptr2 = outptr1 + ldout;
-        int32_t *outptr3 = outptr2 + ldout;
-        int32_t *outptr4 = outptr3 + ldout;
-        int32_t *outptr5 = outptr4 + ldout;
-        int32_t *outptr6 = outptr5 + ldout;
-        int32_t *outptr7 = outptr6 + ldout;
-
-        prefetch_2x(outptr0);
-        prefetch_2x(outptr1);
-        prefetch_2x(outptr2);
-        prefetch_2x(outptr3);
-        prefetch_2x(outptr4);
-        prefetch_2x(outptr5);
-        prefetch_2x(outptr6);
-        prefetch_2x(outptr7);
-
-        for (int i=x0; i<xmax; i+=12) {
-            int32_t dummyres[12];
-
-            /* Make sure we throw away results if Y isn't a multiple of 8.
-             * We do this by pointing the result pointer at a dummy buffer
-             * we later discard.  */
-            if ((y+7) >= ymax) {
-                switch ((y + 7) - ymax) {
-                    case 6:
-                        outptr1 = dummyres;
-                        // fall through
-                    case 5:
-                        outptr2 = dummyres;
-                        // fall through
-                    case 4:
-                        outptr3 = dummyres;
-                        // fall through
-                    case 3:
-                        outptr4 = dummyres;
-                        // fall through
-                    case 2:
-                        outptr5 = dummyres;
-                        // fall through
-                    case 1:
-                        outptr6 = dummyres;
-                        // fall through
-                    case 0:
-                        outptr7 = dummyres;
-                        break;
-
-                    default:
-                        UNREACHABLE("Impossible.");
-                }
-            }
-
-            /* For ragged X, manually copy over the valid results. */
-            if ((i+11) >= xmax) {
-                for (int xi=0; xi<12; xi++) {
-                    if ((i+xi) < xmax) {
-                        *outptr0 = (inptr[xi]) + (*outptr0 * beta);
-                        outptr0++;
-                        *outptr1 = (inptr[xi + 12]) + (*outptr1 * beta);
-                        outptr1++;
-                        *outptr2 = (inptr[xi + 24]) + (*outptr2 * beta);
-                        outptr2++;
-                        *outptr3 = (inptr[xi + 36]) + (*outptr3 * beta);
-                        outptr3++;
-                        *outptr4 = (inptr[xi + 48]) + (*outptr4 * beta);
-                        outptr4++;
-                        *outptr5 = (inptr[xi + 60]) + (*outptr5 * beta);
-                        outptr5++;
-                        *outptr6 = (inptr[xi + 72]) + (*outptr6 * beta);
-                        outptr6++;
-                        *outptr7 = (inptr[xi + 84]) + (*outptr7 * beta);
-                        outptr7++;
-                    }
-                }
-                inptr += 96;
-            } else {
-                if (beta == 0u) {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Row 0
-                        ASM_PREFETCH("[%x[outptr1], #192]")
-                        "ldr q0, [%x[inptr]]\n"
-                        "ldr q1, [%x[inptr], #0x10]\n"
-                        "ldr q2, [%x[inptr], #0x20]\n"
-
-                        // Row 1
-                        ASM_PREFETCH("[%x[outptr2], #192]")
-                        "ldr q3, [%x[inptr], #0x30]\n"
-                        "str q0, [%x[outptr0]], #0x10\n"
-                        "ldr q4, [%x[inptr], #0x40]\n"
-                        "str q1, [%x[outptr0]], #0x10\n"
-                        "ldr q5, [%x[inptr], #0x50]\n"
-                        "str q2, [%x[outptr0]], #0x10\n"
-
-                        // Row 2
-                        ASM_PREFETCH("[%x[outptr3], #192]")
-                        "ldr q0, [%x[inptr], #0x60]\n"
-                        "str q3, [%x[outptr1]], #0x10\n"
-                        "ldr q1, [%x[inptr], #0x70]\n"
-                        "str q4, [%x[outptr1]], #0x10\n"
-                        "ldr q2, [%x[inptr], #0x80]\n"
-                        "str q5, [%x[outptr1]], #0x10\n"
-
-                        // Row 3
-                        ASM_PREFETCH("[%x[outptr4], #192]")
-                        "ldr q3, [%x[inptr], #0x90]\n"
-                        "str q0, [%x[outptr2]], #0x10\n"
-                        "ldr q4, [%x[inptr], #0xa0]\n"
-                        "str q1, [%x[outptr2]], #0x10\n"
-                        "ldr q5, [%x[inptr], #0xb0]\n"
-                        "str q2, [%x[outptr2]], #0x10\n"
-
-                        // Row 4
-                        ASM_PREFETCH("[%x[outptr5], #192]")
-                        "ldr q0, [%x[inptr], #0xc0]\n"
-                        "str q3, [%x[outptr3]], #0x10\n"
-                        "ldr q1, [%x[inptr], #0xd0]\n"
-                        "str q4, [%x[outptr3]], #0x10\n"
-                        "ldr q2, [%x[inptr], #0xe0]\n"
-                        "str q5, [%x[outptr3]], #0x10\n"
-
-                        // Row 5
-                        ASM_PREFETCH("[%x[outptr6], #192]")
-                        "ldr q3, [%x[inptr], #0xf0]\n"
-                        "str q0, [%x[outptr4]], #0x10\n"
-                        "ldr q4, [%x[inptr], #0x100]\n"
-                        "str q1, [%x[outptr4]], #0x10\n"
-                        "ldr q5, [%x[inptr], #0x110]\n"
-                        "str q2, [%x[outptr4]], #0x10\n"
-
-                        // Row 6
-                        ASM_PREFETCH("[%x[outptr7], #192]")
-                        "ldr q0, [%x[inptr], #0x120]\n"
-                        "str q3, [%x[outptr5]], #0x10\n"
-                        "ldr q1, [%x[inptr], #0x130]\n"
-                        "str q4, [%x[outptr5]], #0x10\n"
-                        "ldr q2, [%x[inptr], #0x140]\n"
-                        "str q5, [%x[outptr5]], #0x10\n"
-
-                        // Row 7
-                        "ldr q3, [%x[inptr], #0x150]\n"
-                        "str q0, [%x[outptr6]], #0x10\n"
-                        "ldr q4, [%x[inptr], #0x160]\n"
-                        "str q1, [%x[outptr6]], #0x10\n"
-                        "ldr q5, [%x[inptr], #0x170]\n"
-                        "str q2, [%x[outptr6]], #0x10\n"
-                        "str q3, [%x[outptr7]], #0x10\n"
-                        "str q4, [%x[outptr7]], #0x10\n"
-                        "str q5, [%x[outptr7]], #0x10\n"
-
-                        "add %x[inptr], %x[inptr], #0x180\n"
-                        : [outptr0] "+r" (outptr0),
-                          [outptr1] "+r" (outptr1),
-                          [outptr2] "+r" (outptr2),
-                          [outptr3] "+r" (outptr3),
-                          [outptr4] "+r" (outptr4),
-                          [outptr5] "+r" (outptr5),
-                          [outptr6] "+r" (outptr6),
-                          [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr)
-                        :
-                        : "v0", "v1", "v2", "v3", "v4", "v5", "v6"
-                    );
-                } else {
-                    /* Optimized routine to copy an entire block */
-                    __asm __volatile (
-                        // Row 0
-                        ASM_PREFETCH("[%x[outptr1], #192]")
-                        "ldr q3, [%x[outptr0]]\n"
-                        "ldr q4, [%x[outptr0], #0x10]\n"
-                        "ldr q5, [%x[outptr0], #0x20]\n"
-                        "ldr q6, [%x[inptr]]\n"
-                        "ldr q7, [%x[inptr], #0x10]\n"
-                        "ldr q8, [%x[inptr], #0x20]\n"
-                        "add v3.4s, v3.4s, v6.4s\n"
-                        "ldr q0, [%x[outptr1]]\n"
-                        "add v4.4s, v4.4s, v7.4s\n"
-                        "ldr q1, [%x[outptr1], #0x10]\n"
-                        "add v5.4s, v5.4s, v8.4s\n"
-                        "ldr q2, [%x[outptr1], #0x20]\n"
-
-                        // Row 1
-                        ASM_PREFETCH("[%x[outptr2], #192]")
-                        "ldr q6, [%x[inptr], #0x30]\n"
-                        "str q3, [%x[outptr0]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0x40]\n"
-                        "str q4, [%x[outptr0]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0x50]\n"
-                        "str q5, [%x[outptr0]], #0x10\n"
-                        "add v0.4s, v0.4s, v6.4s\n"
-                        "ldr q3, [%x[outptr2]]\n"
-                        "add v1.4s, v1.4s, v7.4s\n"
-                        "ldr q4, [%x[outptr2], #0x10]\n"
-                        "add v2.4s, v2.4s, v8.4s\n"
-                        "ldr q5, [%x[outptr2], #0x20]\n"
-
-                        // Row 2
-                        ASM_PREFETCH("[%x[outptr3], #192]")
-                        "ldr q6, [%x[inptr], #0x60]\n"
-                        "str q0, [%x[outptr1]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0x70]\n"
-                        "str q1, [%x[outptr1]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0x80]\n"
-                        "str q2, [%x[outptr1]], #0x10\n"
-                        "add v3.4s, v3.4s, v6.4s\n"
-                        "ldr q0, [%x[outptr3]]\n"
-                        "add v4.4s, v4.4s, v7.4s\n"
-                        "ldr q1, [%x[outptr3], #0x10]\n"
-                        "add v5.4s, v5.4s, v8.4s\n"
-                        "ldr q2, [%x[outptr3], #0x20]\n"
-
-                        // Row 3
-                        ASM_PREFETCH("[%x[outptr4], #192]")
-                        "ldr q6, [%x[inptr], #0x90]\n"
-                        "str q3, [%x[outptr2]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0xa0]\n"
-                        "str q4, [%x[outptr2]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0xb0]\n"
-                        "str q5, [%x[outptr2]], #0x10\n"
-                        "add v0.4s, v0.4s, v6.4s\n"
-                        "ldr q3, [%x[outptr4]]\n"
-                        "add v1.4s, v1.4s, v7.4s\n"
-                        "ldr q4, [%x[outptr4], #0x10]\n"
-                        "add v2.4s, v2.4s, v8.4s\n"
-                        "ldr q5, [%x[outptr4], #0x20]\n"
-
-                        // Row 4
-                        ASM_PREFETCH("[%x[outptr5], #192]")
-                        "ldr q6, [%x[inptr], #0xc0]\n"
-                        "str q0, [%x[outptr3]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0xd0]\n"
-                        "str q1, [%x[outptr3]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0xe0]\n"
-                        "str q2, [%x[outptr3]], #0x10\n"
-                        "add v3.4s, v3.4s, v6.4s\n"
-                        "ldr q0, [%x[outptr5]]\n"
-                        "add v4.4s, v4.4s, v7.4s\n"
-                        "ldr q1, [%x[outptr5], #0x10]\n"
-                        "add v5.4s, v5.4s, v8.4s\n"
-                        "ldr q2, [%x[outptr5], #0x20]\n"
-
-                        // Row 5
-                        ASM_PREFETCH("[%x[outptr6], #192]")
-                        "ldr q6, [%x[inptr], #0xf0]\n"
-                        "str q3, [%x[outptr4]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0x100]\n"
-                        "str q4, [%x[outptr4]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0x110]\n"
-                        "str q5, [%x[outptr4]], #0x10\n"
-                        "add v0.4s, v0.4s, v6.4s\n"
-                        "ldr q3, [%x[outptr6]]\n"
-                        "add v1.4s, v1.4s, v7.4s\n"
-                        "ldr q4, [%x[outptr6], #0x10]\n"
-                        "add v2.4s, v2.4s, v8.4s\n"
-                        "ldr q5, [%x[outptr6], #0x20]\n"
-
-                        // Row 6
-                        ASM_PREFETCH("[%x[outptr7], #192]")
-                        "ldr q6, [%x[inptr], #0x120]\n"
-                        "str q0, [%x[outptr5]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0x130]\n"
-                        "str q1, [%x[outptr5]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0x140]\n"
-                        "str q2, [%x[outptr5]], #0x10\n"
-                        "add v3.4s, v3.4s, v6.4s\n"
-                        "ldr q0, [%x[outptr7]]\n"
-                        "add v4.4s, v4.4s, v7.4s\n"
-                        "ldr q1, [%x[outptr7], #0x10]\n"
-                        "add v5.4s, v5.4s, v8.4s\n"
-                        "ldr q2, [%x[outptr7], #0x20]\n"
-
-                        // Row 7
-                        "ldr q6, [%x[inptr], #0x150]\n"
-                        "str q3, [%x[outptr6]], #0x10\n"
-                        "ldr q7, [%x[inptr], #0x160]\n"
-                        "str q4, [%x[outptr6]], #0x10\n"
-                        "ldr q8, [%x[inptr], #0x170]\n"
-                        "str q5, [%x[outptr6]], #0x10\n"
-                        "add v0.4s, v0.4s, v6.4s\n"
-                        "add v1.4s, v1.4s, v7.4s\n"
-                        "add v2.4s, v2.4s, v8.4s\n"
-                        "str q0, [%x[outptr7]], #0x10\n"
-                        "str q1, [%x[outptr7]], #0x10\n"
-                        "str q2, [%x[outptr7]], #0x10\n"
-
-                        "add %x[inptr], %x[inptr], #0x180\n"
-                        : [outptr0] "+r" (outptr0),
-                          [outptr1] "+r" (outptr1),
-                          [outptr2] "+r" (outptr2),
-                          [outptr3] "+r" (outptr3),
-                          [outptr4] "+r" (outptr4),
-                          [outptr5] "+r" (outptr5),
-                          [outptr6] "+r" (outptr6),
-                          [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr)
-                        :
-                        : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"
-                    );
-
-                }
-            }
-        }
-    }
-}
-
-template<>
-inline void MergeResults<12, 8>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta) {
-  // Since the above code uses only MUL and MLA instructions discard the "unsignedness" and proceed safely.
-  MergeResults<12, 8>(reinterpret_cast<int32_t*>(out), reinterpret_cast<const int32_t*>(in), ldout, y0, ymax, x0, xmax, static_cast<const int32_t>(alpha), static_cast<const int32_t>(beta));
-}
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_12x8.hpp
new file mode 100644
index 0000000..313f829
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_12x8.hpp
@@ -0,0 +1,1595 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+template<>
+void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t *bias, Activation act, bool append)
+{
+    UNUSED(act);
+
+    const int32_t *inptr = in;
+    int32_t nullbias[12] = { 0 };
+
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (12 * sizeof(int32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
+        int32_t *outptr0 = out + (y * ldout) + x0;
+        int32_t *outptr1 = outptr0 + ldout;
+        int32_t *outptr2 = outptr1 + ldout;
+        int32_t *outptr3 = outptr2 + ldout;
+        int32_t *outptr4 = outptr3 + ldout;
+        int32_t *outptr5 = outptr4 + ldout;
+        int32_t *outptr6 = outptr5 + ldout;
+        int32_t *outptr7 = outptr6 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=12)
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "str q13, [%[outptr1]]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "str q16, [%[outptr2]]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 += inptr[xi + 72];
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "str q16, [%[outptr2]]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 += inptr[xi + 72];
+                                    outptr6++;
+                                    *outptr7 += inptr[xi + 84];
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "ldr q7, [%[outptr7]]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "ldr q15, [%[inptr], #0x150]\n"
+                                "ldr q8, [%[outptr7], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x160]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr7], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x170]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr7]]\n"
+                                "str q16, [%[outptr7], #0x10]\n"
+                                "str q17, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const int32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "str q19, [%[outptr2]]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "str q19, [%[outptr2]]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "str q14, [%[outptr3]]\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "str q14, [%[outptr3]]\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "str q17, [%[outptr4]]\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "str q17, [%[outptr4]]\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "str q20, [%[outptr5]]\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 = biasptr[xi] + inptr[xi + 72];
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v15.4s, v15.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "str q17, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v16.4s, v16.4s, v3.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v4.4s\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "str q20, [%[outptr5]]\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "str q15, [%[outptr6]]\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 = biasptr[xi] + inptr[xi + 72];
+                                    outptr6++;
+                                    *outptr7 = biasptr[xi] + inptr[xi + 84];
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v15.4s, v15.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "str q17, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "add v16.4s, v16.4s, v3.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v4.4s\n"
+                                "ldr q18, [%[inptr], #0x150]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v18.4s, v18.4s, v2.4s\n"
+                                "ldr q19, [%[inptr], #0x160]\n"
+                                "str q20, [%[outptr5]]\n"
+                                "ldr q20, [%[inptr], #0x170]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v19.4s, v19.4s, v3.4s\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "add v20.4s, v20.4s, v4.4s\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "str q15, [%[outptr6]]\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "str q18, [%[outptr7]]\n"
+                                "str q19, [%[outptr7], #0x10]\n"
+                                "str q20, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_4x4.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_4x4.hpp
new file mode 100644
index 0000000..a93060f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_s32_4x4.hpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+template<>
+void MergeResults<4, 4, false>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t *bias, Activation act, bool append)
+{
+    UNUSED(act);
+
+    const int32_t *inptr = in;
+    int32_t nullbias[4] = { 0 };
+
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (4 * sizeof(int32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=4)
+    {
+        int32_t *outptr0 = out + (y * ldout) + x0;
+        int32_t *outptr1 = outptr0 + ldout;
+        int32_t *outptr2 = outptr1 + ldout;
+        int32_t *outptr3 = outptr2 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=4)
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q11, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 8];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "str q12, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 4:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 8];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 12];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x20]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr3]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "str q12, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q13, [%[outptr3]]\n"
+                                "add %[outptr3], %[outptr3], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const int32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 8];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x20]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x20]\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "str q13, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 4:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 8];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 12];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x20]\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "str q13, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                                "str q14, [%[outptr3]]\n"
+                                "add %[outptr3], %[outptr3], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_12x8.hpp
new file mode 100644
index 0000000..5569f51
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_12x8.hpp
@@ -0,0 +1,1595 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+template<>
+void MergeResults<12, 8, false>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t *bias, Activation act, bool append)
+{
+    UNUSED(act);
+
+    const uint32_t *inptr = in;
+    uint32_t nullbias[12] = { 0 };
+
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (12 * sizeof(uint32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
+        uint32_t *outptr0 = out + (y * ldout) + x0;
+        uint32_t *outptr1 = outptr0 + ldout;
+        uint32_t *outptr2 = outptr1 + ldout;
+        uint32_t *outptr3 = outptr2 + ldout;
+        uint32_t *outptr4 = outptr3 + ldout;
+        uint32_t *outptr5 = outptr4 + ldout;
+        uint32_t *outptr6 = outptr5 + ldout;
+        uint32_t *outptr7 = outptr6 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=12)
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "str q13, [%[outptr1]]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "str q16, [%[outptr2]]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 += inptr[xi + 72];
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "str q16, [%[outptr2]]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 += inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 += inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 += inptr[xi + 72];
+                                    outptr6++;
+                                    *outptr7 += inptr[xi + 84];
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q3, [%[outptr0], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr0], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr0], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                                "ldr q6, [%[outptr1], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q12, [%[outptr0], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x40]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "ldr q7, [%[outptr1], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q13, [%[outptr1]]\n"
+                                "ldr q15, [%[inptr], #0x50]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "ldr q8, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q14, [%[outptr1], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x60]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "ldr q9, [%[outptr2], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q15, [%[outptr1], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x70]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "ldr q2, [%[outptr2], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q16, [%[outptr2]]\n"
+                                "ldr q10, [%[inptr], #0x80]\n"
+                                "ldr q3, [%[outptr3]]\n"
+                                "ldr q11, [%[inptr], #0x90]\n"
+                                "str q17, [%[outptr2], #0x10]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q4, [%[outptr3], #0x10]\n"
+                                "ldr q12, [%[inptr], #0xa0]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "ldr q5, [%[outptr3], #0x20]\n"
+                                "ldr q13, [%[inptr], #0xb0]\n"
+                                "str q10, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr4]]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0xc0]\n"
+                                "ldr q7, [%[outptr4], #0x10]\n"
+                                "ldr q15, [%[inptr], #0xd0]\n"
+                                "str q12, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "ldr q8, [%[outptr4], #0x20]\n"
+                                "ldr q16, [%[inptr], #0xe0]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "str q13, [%[outptr3], #0x20]\n"
+                                "ldr q9, [%[outptr5]]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0xf0]\n"
+                                "ldr q2, [%[outptr5], #0x10]\n"
+                                "ldr q10, [%[inptr], #0x100]\n"
+                                "str q15, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "ldr q3, [%[outptr5], #0x20]\n"
+                                "ldr q11, [%[inptr], #0x110]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q16, [%[outptr4], #0x20]\n"
+                                "ldr q4, [%[outptr6]]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q17, [%[outptr5]]\n"
+                                "ldr q12, [%[inptr], #0x120]\n"
+                                "ldr q5, [%[outptr6], #0x10]\n"
+                                "ldr q13, [%[inptr], #0x130]\n"
+                                "str q10, [%[outptr5], #0x10]\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "ldr q6, [%[outptr6], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x140]\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q11, [%[outptr5], #0x20]\n"
+                                "ldr q7, [%[outptr7]]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "add v14.4s, v14.4s, v6.4s\n"
+                                "str q12, [%[outptr6]]\n"
+                                "ldr q15, [%[inptr], #0x150]\n"
+                                "ldr q8, [%[outptr7], #0x10]\n"
+                                "ldr q16, [%[inptr], #0x160]\n"
+                                "str q13, [%[outptr6], #0x10]\n"
+                                "add v15.4s, v15.4s, v7.4s\n"
+                                "ldr q9, [%[outptr7], #0x20]\n"
+                                "ldr q17, [%[inptr], #0x170]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v16.4s, v16.4s, v8.4s\n"
+                                "str q14, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "add v17.4s, v17.4s, v9.4s\n"
+                                "str q15, [%[outptr7]]\n"
+                                "str q16, [%[outptr7], #0x10]\n"
+                                "str q17, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const uint32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "str q19, [%[outptr2]]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 4:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "str q16, [%[outptr1]]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "str q19, [%[outptr2]]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "str q14, [%[outptr3]]\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 5:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "str q14, [%[outptr3]]\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "str q17, [%[outptr4]]\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 6:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "str q17, [%[outptr4]]\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "str q20, [%[outptr5]]\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 7:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 = biasptr[xi] + inptr[xi + 72];
+                                    outptr6++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v15.4s, v15.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "str q17, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v16.4s, v16.4s, v3.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v4.4s\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "str q20, [%[outptr5]]\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "str q15, [%[outptr6]]\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        if ((i+11) >= xmax)
+                        {
+                            for (int xi=0; xi<11; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 12];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 24];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 36];
+                                    outptr3++;
+                                    *outptr4 = biasptr[xi] + inptr[xi + 48];
+                                    outptr4++;
+                                    *outptr5 = biasptr[xi] + inptr[xi + 60];
+                                    outptr5++;
+                                    *outptr6 = biasptr[xi] + inptr[xi + 72];
+                                    outptr6++;
+                                    *outptr7 = biasptr[xi] + inptr[xi + 84];
+                                    outptr7++;
+                                }
+                            }
+                            inptr += 96;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                                "ldr q3, [%[biasptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                                "ldr q4, [%[biasptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                                "ldr q13, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                                "ldr q14, [%[inptr], #0x10]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "ldr q15, [%[inptr], #0x20]\n"
+                                "ldr q16, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                                "add v14.4s, v14.4s, v3.4s\n"
+                                "str q13, [%[outptr0]]\n"
+                                "add v15.4s, v15.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0x40]\n"
+                                "add v16.4s, v16.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0x50]\n"
+                                "ldr q19, [%[inptr], #0x60]\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                                "add v17.4s, v17.4s, v3.4s\n"
+                                "str q14, [%[outptr0], #0x10]\n"
+                                "add v18.4s, v18.4s, v4.4s\n"
+                                "ldr q20, [%[inptr], #0x70]\n"
+                                "add v19.4s, v19.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x80]\n"
+                                "ldr q14, [%[inptr], #0x90]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                                "add v20.4s, v20.4s, v3.4s\n"
+                                "str q15, [%[outptr0], #0x20]\n"
+                                "add v13.4s, v13.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0xa0]\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x30\n"
+                                "str q16, [%[outptr1]]\n"
+                                "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                                "add v15.4s, v15.4s, v3.4s\n"
+                                "ldr q16, [%[inptr], #0xb0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                                "str q17, [%[outptr1], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                                "add v16.4s, v16.4s, v4.4s\n"
+                                "ldr q17, [%[inptr], #0xc0]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                                "str q18, [%[outptr1], #0x20]\n"
+                                "add %[outptr1], %[outptr1], #0x30\n"
+                                "add v17.4s, v17.4s, v2.4s\n"
+                                "ldr q18, [%[inptr], #0xd0]\n"
+                                "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                                "str q19, [%[outptr2]]\n"
+                                "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                                "add v18.4s, v18.4s, v3.4s\n"
+                                "ldr q19, [%[inptr], #0xe0]\n"
+                                "str q20, [%[outptr2], #0x10]\n"
+                                "ldr q20, [%[inptr], #0xf0]\n"
+                                "add v19.4s, v19.4s, v4.4s\n"
+                                "str q13, [%[outptr2], #0x20]\n"
+                                "add %[outptr2], %[outptr2], #0x30\n"
+                                "add v20.4s, v20.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x100]\n"
+                                "str q14, [%[outptr3]]\n"
+                                "ldr q14, [%[inptr], #0x110]\n"
+                                "add v13.4s, v13.4s, v3.4s\n"
+                                "str q15, [%[outptr3], #0x10]\n"
+                                "add v14.4s, v14.4s, v4.4s\n"
+                                "ldr q15, [%[inptr], #0x120]\n"
+                                "str q16, [%[outptr3], #0x20]\n"
+                                "add %[outptr3], %[outptr3], #0x30\n"
+                                "add v15.4s, v15.4s, v2.4s\n"
+                                "ldr q16, [%[inptr], #0x130]\n"
+                                "str q17, [%[outptr4]]\n"
+                                "ldr q17, [%[inptr], #0x140]\n"
+                                "add v16.4s, v16.4s, v3.4s\n"
+                                "str q18, [%[outptr4], #0x10]\n"
+                                "add v17.4s, v17.4s, v4.4s\n"
+                                "ldr q18, [%[inptr], #0x150]\n"
+                                "str q19, [%[outptr4], #0x20]\n"
+                                "add %[outptr4], %[outptr4], #0x30\n"
+                                "add v18.4s, v18.4s, v2.4s\n"
+                                "ldr q19, [%[inptr], #0x160]\n"
+                                "str q20, [%[outptr5]]\n"
+                                "ldr q20, [%[inptr], #0x170]\n"
+                                "add %[inptr], %[inptr], #0x180\n"
+                                "add v19.4s, v19.4s, v3.4s\n"
+                                "str q13, [%[outptr5], #0x10]\n"
+                                "add v20.4s, v20.4s, v4.4s\n"
+                                "str q14, [%[outptr5], #0x20]\n"
+                                "add %[outptr5], %[outptr5], #0x30\n"
+                                "str q15, [%[outptr6]]\n"
+                                "str q16, [%[outptr6], #0x10]\n"
+                                "str q17, [%[outptr6], #0x20]\n"
+                                "add %[outptr6], %[outptr6], #0x30\n"
+                                "str q18, [%[outptr7]]\n"
+                                "str q19, [%[outptr7], #0x10]\n"
+                                "str q20, [%[outptr7], #0x20]\n"
+                                "add %[outptr7], %[outptr7], #0x30\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_4x4.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_4x4.hpp
new file mode 100644
index 0000000..fd01bb2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_u32_4x4.hpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+template<>
+void MergeResults<4, 4, false>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t *bias, Activation act, bool append)
+{
+    UNUSED(act);
+
+    const uint32_t *inptr = in;
+    uint32_t nullbias[4] = { 0 };
+
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (4 * sizeof(uint32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=4)
+    {
+        uint32_t *outptr0 = out + (y * ldout) + x0;
+        uint32_t *outptr1 = outptr0 + ldout;
+        uint32_t *outptr2 = outptr1 + ldout;
+        uint32_t *outptr3 = outptr2 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=4)
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q11, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 8];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "str q12, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 4:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 += inptr[xi];
+                                    outptr0++;
+                                    *outptr1 += inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 += inptr[xi + 8];
+                                    outptr2++;
+                                    *outptr3 += inptr[xi + 12];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[outptr0]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q10, [%[inptr]]\n"
+                                "prfm PLDL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q3, [%[outptr1]]\n"
+                                "prfm PLDL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v10.4s, v10.4s, v2.4s\n"
+                                "ldr q11, [%[inptr], #0x10]\n"
+                                "ldr q4, [%[outptr2]]\n"
+                                "prfm PLDL1KEEP, [%[outptr2], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x20]\n"
+                                "prfm PLDL1KEEP, [%[outptr3], #0x20]\n"
+                                "add v11.4s, v11.4s, v3.4s\n"
+                                "str q10, [%[outptr0]]\n"
+                                "ldr q5, [%[outptr3]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v12.4s, v12.4s, v4.4s\n"
+                                "str q11, [%[outptr1]]\n"
+                                "ldr q13, [%[inptr], #0x30]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "str q12, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                                "add v13.4s, v13.4s, v5.4s\n"
+                                "str q13, [%[outptr3]]\n"
+                                "add %[outptr3], %[outptr3], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            :
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const uint32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 2:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                case 3:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 8];
+                                    outptr2++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x20]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x20]\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "str q13, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+                default:
+                case 4:
+                    {
+                        if ((i+3) >= xmax)
+                        {
+                            for (int xi=0; xi<3; xi++)
+                            {
+                                if ((i+xi) < xmax)
+                                {
+                                    *outptr0 = biasptr[xi] + inptr[xi];
+                                    outptr0++;
+                                    *outptr1 = biasptr[xi] + inptr[xi + 4];
+                                    outptr1++;
+                                    *outptr2 = biasptr[xi] + inptr[xi + 8];
+                                    outptr2++;
+                                    *outptr3 = biasptr[xi] + inptr[xi + 12];
+                                    outptr3++;
+                                }
+                            }
+                            inptr += 16;
+                        } else {
+                            /* Optimized routine to copy an entire block */
+                            __asm __volatile (
+                                "ldr q2, [%[biasptr]]\n"
+                                "prfm PLDL1KEEP, [%[inptr], #0x40]\n"
+                                "ldr q11, [%[inptr]]\n"
+                                "prfm PSTL1KEEP, [%[outptr0], #0x20]\n"
+                                "ldr q12, [%[inptr], #0x10]\n"
+                                "prfm PSTL1KEEP, [%[outptr1], #0x20]\n"
+                                "add v11.4s, v11.4s, v2.4s\n"
+                                "ldr q13, [%[inptr], #0x20]\n"
+                                "ldr q14, [%[inptr], #0x30]\n"
+                                "prfm PSTL1KEEP, [%[outptr2], #0x20]\n"
+                                "add v12.4s, v12.4s, v2.4s\n"
+                                "str q11, [%[outptr0]]\n"
+                                "add v13.4s, v13.4s, v2.4s\n"
+                                "add %[outptr0], %[outptr0], #0x10\n"
+                                "add v14.4s, v14.4s, v2.4s\n"
+                                "str q12, [%[outptr1]]\n"
+                                "add %[outptr1], %[outptr1], #0x10\n"
+                                "prfm PSTL1KEEP, [%[outptr3], #0x20]\n"
+                                "add %[inptr], %[inptr], #0x40\n"
+                                "str q13, [%[outptr2]]\n"
+                                "add %[outptr2], %[outptr2], #0x10\n"
+                                "str q14, [%[outptr3]]\n"
+                                "add %[outptr3], %[outptr3], #0x10\n"
+                            : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+                              [inptr] "+r" (inptr)
+                            : [biasptr] "r" (biasptr)
+                            : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "memory"
+                            );
+                        }
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/list.hpp b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
index 788a957..4edb497 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
@@ -22,10 +22,13 @@
  * SOFTWARE.
  */
 #include "a32_merge_float_8x6.hpp"
-#include "a64_merge_float_12x8.hpp"
-#include "a64_merge_float_to_half_12x8.hpp"
-#include "a64_merge_half_24x8.hpp"
-#include "a64_merge_int32_12x8.hpp"
+#include "a64_merge_fp16_24x8.hpp"
+#include "a64_merge_fp32_12x8.hpp"
+#include "a64_merge_s32_12x8.hpp"
+#include "a64_merge_s32_4x4.hpp"
+#include "a64_merge_u32_12x8.hpp"
+#include "a64_merge_u32_4x4.hpp"
+#include "sve_merge_fp16_3VLx8.hpp"
 #include "sve_merge_fp32_3VLx8.hpp"
 #include "sve_merge_s32_3VLx8.hpp"
 #include "sve_merge_u32_3VLx8.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp16_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp16_3VLx8.hpp
new file mode 100644
index 0000000..e5efc09
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp16_3VLx8.hpp
@@ -0,0 +1,1879 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+void MergeResults<3, 8, true>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const __fp16 *bias, Activation act, bool append)
+{
+    const __fp16 *inptr = in;
+    __fp16 nullbias[384] = { 0 };
+    __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+    __fp16 maxval =   static_cast<__fp16>(std::numeric_limits<float>::infinity());
+
+    switch(act.type)
+    {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            maxval = static_cast<__fp16>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            minval = 0.0f;
+            break;
+    }
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (3 * get_vector_length<__fp16>() * sizeof(__fp16)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
+        __fp16 *outptr0 = out + (y * ldout) + x0;
+        __fp16 *outptr1 = outptr0 + ldout;
+        __fp16 *outptr2 = outptr1 + ldout;
+        __fp16 *outptr3 = outptr2 + ldout;
+        __fp16 *outptr4 = outptr3 + ldout;
+        __fp16 *outptr5 = outptr4 + ldout;
+        __fp16 *outptr6 = outptr5 + ldout;
+        __fp16 *outptr7 = outptr6 + ldout;
+
+        const int height = ymax - y;
+
+        for (int i=x0; i<xmax; i+=(3 * get_vector_length<__fp16>()))
+        {
+            if (append)
+            {
+                switch(height)
+                {
+                case 1:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 2:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 3:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 4:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "ld1h z3.h, p0/z, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z11.h, p0/m, z11.h, z0.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z12.h, p1/m, z12.h, z0.h\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmax z11.h, p0/m, z11.h, z1.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "fmax z12.h, p1/m, z12.h, z1.h\n"
+                            "st1h z11.h, p0, [%[outptr3]]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z12.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1h z13.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 5:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "ld1h z3.h, p0/z, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p0/z, [%[outptr4]]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z7.h, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "ld1h z8.h, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.h, p0/m, z11.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.h, p1/m, z12.h, z0.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z11.h, p0/m, z11.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z12.h, p1/m, z12.h, z1.h\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z11.h, p0, [%[outptr3]]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "st1h z12.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "st1h z13.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr4]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "st1h z15.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1h z16.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 6:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "ld1h z3.h, p0/z, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p0/z, [%[outptr4]]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z7.h, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "ld1h z8.h, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.h, p0/m, z11.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.h, p1/m, z12.h, z0.h\n"
+                            "ld1h z9.h, p0/z, [%[outptr5]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.h, p0/m, z11.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "ld1h z2.h, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.h, p1/m, z12.h, z1.h\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z10.h, p1/z, [x8]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z3.h, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z11.h, p0, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "st1h z12.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "st1h z13.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr4]]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z15.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "st1h z16.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z10.h, p1/m, z10.h, z0.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmin z11.h, p2/m, z11.h, z0.h\n"
+                            "st1h z17.h, p0, [%[outptr5]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z10.h, p1/m, z10.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z11.h, p2/m, z11.h, z1.h\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1h z10.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "st1h z11.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 7:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "ld1h z3.h, p0/z, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p0/z, [%[outptr4]]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z7.h, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "ld1h z8.h, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.h, p0/m, z11.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.h, p1/m, z12.h, z0.h\n"
+                            "ld1h z9.h, p0/z, [%[outptr5]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.h, p0/m, z11.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "ld1h z2.h, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.h, p1/m, z12.h, z1.h\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z10.h, p1/z, [x8]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z3.h, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z11.h, p0, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p0/z, [%[outptr6]]\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "st1h z12.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p1/z, [x8, #3, MUL VL]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr4]]\n"
+                            "fmin z10.h, p1/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p2/z, [x8, #4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "st1h z15.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z10.h, p1/m, z10.h, z1.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z11.h, p2/m, z11.h, z0.h\n"
+                            "st1h z16.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "st1h z17.h, p0, [%[outptr5]]\n"
+                            "fmax z11.h, p2/m, z11.h, z1.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z12.h, p0/m, z12.h, z0.h\n"
+                            "st1h z10.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmin z13.h, p1/m, z13.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "st1h z11.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z12.h, p0/m, z12.h, z1.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z13.h, p1/m, z13.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z14.h, p2/m, z14.h, z0.h\n"
+                            "st1h z12.h, p0, [%[outptr6]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fmax z14.h, p2/m, z14.h, z1.h\n"
+                            "st1h z13.h, p1, [%[outptr6], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                            "st1h z14.h, p2, [%[outptr6], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z10.h, p0/z, [%[inptr]]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z5.h, p0/z, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "ld1h z3.h, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1h z11.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z13.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.h, p0/m, z10.h, z0.h\n"
+                            "ld1h z4.h, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z12.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z6.h, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fmax z10.h, p0/m, z10.h, z1.h\n"
+                            "ld1h z7.h, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.h, p1/m, z11.h, z0.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z8.h, p0/z, [%[outptr2]]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "st1h z10.h, p0, [%[outptr0]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.h, p1/m, z11.h, z1.h\n"
+                            "ld1h z9.h, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.h, p2/m, z12.h, z0.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "ld1h z2.h, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "st1h z11.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z10.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.h, p2/m, z12.h, z1.h\n"
+                            "ld1h z3.h, p0/z, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z12.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p0, [%[outptr1]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p0/z, [%[outptr4]]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.h, p2/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z7.h, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z15.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.h, p2/m, z10.h, z1.h\n"
+                            "ld1h z8.h, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.h, p0/m, z11.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr2]]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.h, p1/m, z12.h, z0.h\n"
+                            "ld1h z9.h, p0/z, [%[outptr5]]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "st1h z17.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.h, p0/m, z11.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "ld1h z2.h, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.h, p1/m, z12.h, z1.h\n"
+                            "st1h z10.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z10.h, p1/z, [x8]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "ld1h z3.h, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z11.h, p0, [%[outptr3]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "ld1h z11.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "ld1h z4.h, p0/z, [%[outptr6]]\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "st1h z12.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "ld1h z12.h, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "ld1h z5.h, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z10.h, z10.h, z2.h\n"
+                            "st1h z13.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "ld1h z13.h, p1/z, [x8, #3, MUL VL]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "ld1h z6.h, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr4]]\n"
+                            "fmin z10.h, p1/m, z10.h, z0.h\n"
+                            "ld1h z14.h, p2/z, [x8, #4, MUL VL]\n"
+                            "fadd z11.h, z11.h, z3.h\n"
+                            "ld1h z7.h, p0/z, [%[outptr7]]\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "st1h z15.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z12.h, z12.h, z4.h\n"
+                            "ld1h z15.h, p0/z, [x8, #5, MUL VL]\n"
+                            "fmax z10.h, p1/m, z10.h, z1.h\n"
+                            "ld1h z8.h, p1/z, [%[outptr7], #1, MUL VL]\n"
+                            "fmin z11.h, p2/m, z11.h, z0.h\n"
+                            "st1h z16.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z13.h, z13.h, z5.h\n"
+                            "ld1h z16.h, p1/z, [x8, #6, MUL VL]\n"
+                            "fmin z12.h, p0/m, z12.h, z0.h\n"
+                            "ld1h z9.h, p2/z, [%[outptr7], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z6.h\n"
+                            "st1h z17.h, p0, [%[outptr5]]\n"
+                            "fmax z11.h, p2/m, z11.h, z1.h\n"
+                            "ld1h z17.h, p2/z, [x8, #7, MUL VL]\n"
+                            "fmin z13.h, p1/m, z13.h, z0.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z12.h, p0/m, z12.h, z1.h\n"
+                            "st1h z10.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmin z14.h, p2/m, z14.h, z0.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z13.h, p1/m, z13.h, z1.h\n"
+                            "st1h z11.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fadd z15.h, z15.h, z7.h\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z14.h, p2/m, z14.h, z1.h\n"
+                            "st1h z12.h, p0, [%[outptr6]]\n"
+                            "fadd z16.h, z16.h, z8.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z15.h, p0/m, z15.h, z0.h\n"
+                            "st1h z13.h, p1, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z17.h, z17.h, z9.h\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z16.h, p1/m, z16.h, z0.h\n"
+                            "st1h z14.h, p2, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z15.h, p0/m, z15.h, z1.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmin z17.h, p2/m, z17.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z16.h, p1/m, z16.h, z1.h\n"
+                            "st1h z15.h, p0, [%[outptr7]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z17.h, p2/m, z17.h, z1.h\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1h z16.h, p1, [%[outptr7], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1h z17.h, p2, [%[outptr7], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+
+                }
+            }
+            else
+            {
+                const __fp16 *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
+                case 1:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 2:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 3:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 4:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "fadd z14.h, z14.h, z2.h\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "fadd z15.h, z15.h, z3.h\n"
+                            "fadd z16.h, z16.h, z4.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr3]]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "st1h z15.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1h z16.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 5:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z2.h\n"
+                            "ld1h z18.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fadd z15.h, z15.h, z3.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "ld1h z19.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fadd z16.h, z16.h, z4.h\n"
+                            "fadd z17.h, z17.h, z2.h\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "st1h z14.h, p0, [%[outptr3]]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "fadd z18.h, z18.h, z3.h\n"
+                            "fadd z19.h, z19.h, z4.h\n"
+                            "st1h z15.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "fmin z18.h, p1/m, z18.h, z0.h\n"
+                            "fmin z19.h, p2/m, z19.h, z0.h\n"
+                            "st1h z16.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.h, p1/m, z18.h, z1.h\n"
+                            "st1h z17.h, p0, [%[outptr4]]\n"
+                            "fmax z19.h, p2/m, z19.h, z1.h\n"
+                            "st1h z18.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1h z19.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 6:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z2.h\n"
+                            "ld1h z18.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z19.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.h, z15.h, z3.h\n"
+                            "fadd z16.h, z16.h, z4.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "ld1h z20.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z2.h\n"
+                            "ld1h z13.h, p1/z, [x8]\n"
+                            "fadd z18.h, z18.h, z3.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "st1h z14.h, p0, [%[outptr3]]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "ld1h z14.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "fmin z18.h, p1/m, z18.h, z0.h\n"
+                            "st1h z15.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z19.h, z19.h, z4.h\n"
+                            "fadd z20.h, z20.h, z2.h\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "st1h z16.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z18.h, p1/m, z18.h, z1.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmin z19.h, p2/m, z19.h, z0.h\n"
+                            "st1h z17.h, p0, [%[outptr4]]\n"
+                            "fmin z20.h, p0/m, z20.h, z0.h\n"
+                            "fadd z13.h, z13.h, z3.h\n"
+                            "fadd z14.h, z14.h, z4.h\n"
+                            "st1h z18.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z19.h, p2/m, z19.h, z1.h\n"
+                            "fmax z20.h, p0/m, z20.h, z1.h\n"
+                            "fmin z13.h, p1/m, z13.h, z0.h\n"
+                            "fmin z14.h, p2/m, z14.h, z0.h\n"
+                            "st1h z19.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmax z13.h, p1/m, z13.h, z1.h\n"
+                            "st1h z20.h, p0, [%[outptr5]]\n"
+                            "fmax z14.h, p2/m, z14.h, z1.h\n"
+                            "st1h z13.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1h z14.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                case 7:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z2.h\n"
+                            "ld1h z18.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z19.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.h, z15.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "ld1h z20.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z16.h, z16.h, z4.h\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z2.h\n"
+                            "ld1h z13.h, p1/z, [x8]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "st1h z14.h, p0, [%[outptr3]]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "ld1h z14.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fadd z18.h, z18.h, z3.h\n"
+                            "fadd z19.h, z19.h, z4.h\n"
+                            "st1h z15.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "ld1h z15.h, p0/z, [x8, #2, MUL VL]\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "fmin z18.h, p1/m, z18.h, z0.h\n"
+                            "fmin z19.h, p2/m, z19.h, z0.h\n"
+                            "st1h z16.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z20.h, z20.h, z2.h\n"
+                            "ld1h z16.h, p1/z, [x8, #3, MUL VL]\n"
+                            "fadd z13.h, z13.h, z3.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.h, p1/m, z18.h, z1.h\n"
+                            "st1h z17.h, p0, [%[outptr4]]\n"
+                            "fmax z19.h, p2/m, z19.h, z1.h\n"
+                            "ld1h z17.h, p2/z, [x8, #4, MUL VL]\n"
+                            "fmin z20.h, p0/m, z20.h, z0.h\n"
+                            "fmin z13.h, p1/m, z13.h, z0.h\n"
+                            "st1h z18.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z14.h, z14.h, z4.h\n"
+                            "fadd z15.h, z15.h, z2.h\n"
+                            "fmax z20.h, p0/m, z20.h, z1.h\n"
+                            "st1h z19.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fmax z13.h, p1/m, z13.h, z1.h\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmin z14.h, p2/m, z14.h, z0.h\n"
+                            "st1h z20.h, p0, [%[outptr5]]\n"
+                            "fmin z15.h, p0/m, z15.h, z0.h\n"
+                            "fadd z16.h, z16.h, z3.h\n"
+                            "fadd z17.h, z17.h, z4.h\n"
+                            "st1h z13.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z14.h, p2/m, z14.h, z1.h\n"
+                            "fmax z15.h, p0/m, z15.h, z1.h\n"
+                            "fmin z16.h, p1/m, z16.h, z0.h\n"
+                            "fmin z17.h, p2/m, z17.h, z0.h\n"
+                            "st1h z14.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fmax z16.h, p1/m, z16.h, z1.h\n"
+                            "st1h z15.h, p0, [%[outptr6]]\n"
+                            "fmax z17.h, p2/m, z17.h, z1.h\n"
+                            "st1h z16.h, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1h z17.h, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+                default:
+                case 8:
+                    {
+                        long w = xmax - i;
+                        long p = 0;
+                        /* Optimized routine to copy an entire block */
+                        __asm __volatile (
+                            "mov z0.h, %h[maxval]\n"
+                            "addvl x8, %[inptr], #16\n"
+                            "mov z1.h, %h[minval]\n"
+                            "whilelt p0.h, %[p], %[w]\n"
+                            "inch %[p], all, mul #1\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1h z2.h, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.h, %[p], %[w]\n"
+                            "ld1h z3.h, p0/z, [%[biasptr], #1, MUL VL]\n"
+                            "inch %[p], all, mul #1\n"
+                            "ld1h z4.h, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+                            "ld1h z13.h, p0/z, [%[inptr]]\n"
+                            "whilelt p2.h, %[p], %[w]\n"
+                            "ld1h z14.h, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.h, z13.h, z2.h\n"
+                            "ld1h z15.h, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1h z16.h, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+                            "fadd z14.h, z14.h, z3.h\n"
+                            "ld1h z17.h, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.h, p0/m, z13.h, z0.h\n"
+                            "ld1h z18.h, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.h, z15.h, z4.h\n"
+                            "ld1h z19.h, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z2.h\n"
+                            "ld1h z20.h, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.h, p1/m, z14.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.h, p0/m, z13.h, z1.h\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.h, p1/m, z14.h, z1.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.h, p2/m, z15.h, z0.h\n"
+                            "st1h z13.h, p0, [%[outptr0]]\n"
+                            "fmin z16.h, p0/m, z16.h, z0.h\n"
+                            "ld1h z13.h, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.h, z17.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.h, p2/m, z15.h, z1.h\n"
+                            "st1h z14.h, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.h, p0/m, z16.h, z1.h\n"
+                            "ld1h z14.h, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.h, p1/m, z17.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.h, z18.h, z4.h\n"
+                            "st1h z15.h, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.h, z19.h, z2.h\n"
+                            "ld1h z15.h, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.h, p1/m, z17.h, z1.h\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.h, p2/m, z18.h, z0.h\n"
+                            "st1h z16.h, p0, [%[outptr1]]\n"
+                            "fmin z19.h, p0/m, z19.h, z0.h\n"
+                            "ld1h z16.h, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.h, z20.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.h, p2/m, z18.h, z1.h\n"
+                            "st1h z17.h, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.h, p0/m, z19.h, z1.h\n"
+                            "ld1h z17.h, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.h, p1/m, z20.h, z0.h\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+                            "fadd z13.h, z13.h, z4.h\n"
+                            "st1h z18.h, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.h, z14.h, z2.h\n"
+                            "ld1h z18.h, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.h, p1/m, z20.h, z1.h\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.h, p2/m, z13.h, z0.h\n"
+                            "st1h z19.h, p0, [%[outptr2]]\n"
+                            "fmin z14.h, p0/m, z14.h, z0.h\n"
+                            "ld1h z19.h, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.h, z15.h, z3.h\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "fmax z13.h, p2/m, z13.h, z1.h\n"
+                            "st1h z20.h, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z14.h, p0/m, z14.h, z1.h\n"
+                            "ld1h z20.h, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z15.h, p1/m, z15.h, z0.h\n"
+                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                            "fadd z16.h, z16.h, z4.h\n"
+                            "st1h z13.h, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.h, z17.h, z2.h\n"
+                            "ld1h z13.h, p1/z, [x8]\n"
+                            "fmax z15.h, p1/m, z15.h, z1.h\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.h, p2/m, z16.h, z0.h\n"
+                            "st1h z14.h, p0, [%[outptr3]]\n"
+                            "fmin z17.h, p0/m, z17.h, z0.h\n"
+                            "ld1h z14.h, p2/z, [x8, #1, MUL VL]\n"
+                            "fadd z18.h, z18.h, z3.h\n"
+                            "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z16.h, p2/m, z16.h, z1.h\n"
+                            "st1h z15.h, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z17.h, p0/m, z17.h, z1.h\n"
+                            "ld1h z15.h, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z18.h, p1/m, z18.h, z0.h\n"
+                            "fadd z19.h, z19.h, z4.h\n"
+                            "st1h z16.h, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z20.h, z20.h, z2.h\n"
+                            "ld1h z16.h, p1/z, [x8, #3, MUL VL]\n"
+                            "fadd z13.h, z13.h, z3.h\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.h, p1/m, z18.h, z1.h\n"
+                            "st1h z17.h, p0, [%[outptr4]]\n"
+                            "fmin z19.h, p2/m, z19.h, z0.h\n"
+                            "ld1h z17.h, p2/z, [x8, #4, MUL VL]\n"
+                            "fmin z20.h, p0/m, z20.h, z0.h\n"
+                            "fmin z13.h, p1/m, z13.h, z0.h\n"
+                            "st1h z18.h, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z14.h, z14.h, z4.h\n"
+                            "ld1h z18.h, p0/z, [x8, #5, MUL VL]\n"
+                            "fmax z19.h, p2/m, z19.h, z1.h\n"
+                            "fmax z20.h, p0/m, z20.h, z1.h\n"
+                            "fmax z13.h, p1/m, z13.h, z1.h\n"
+                            "fmin z14.h, p2/m, z14.h, z0.h\n"
+                            "st1h z19.h, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z15.h, z15.h, z2.h\n"
+                            "ld1h z19.h, p1/z, [x8, #6, MUL VL]\n"
+                            "fadd z16.h, z16.h, z3.h\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmax z14.h, p2/m, z14.h, z1.h\n"
+                            "st1h z20.h, p0, [%[outptr5]]\n"
+                            "fmin z15.h, p0/m, z15.h, z0.h\n"
+                            "ld1h z20.h, p2/z, [x8, #7, MUL VL]\n"
+                            "fmin z16.h, p1/m, z16.h, z0.h\n"
+                            "fadd z17.h, z17.h, z4.h\n"
+                            "st1h z13.h, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fadd z18.h, z18.h, z2.h\n"
+                            "fmax z15.h, p0/m, z15.h, z1.h\n"
+                            "fmax z16.h, p1/m, z16.h, z1.h\n"
+                            "st1h z14.h, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fmin z17.h, p2/m, z17.h, z0.h\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fmin z18.h, p0/m, z18.h, z0.h\n"
+                            "st1h z15.h, p0, [%[outptr6]]\n"
+                            "fadd z19.h, z19.h, z3.h\n"
+                            "fmax z17.h, p2/m, z17.h, z1.h\n"
+                            "fadd z20.h, z20.h, z4.h\n"
+                            "st1h z16.h, p1, [%[outptr6], #1, MUL VL]\n"
+                            "fmax z18.h, p0/m, z18.h, z1.h\n"
+                            "fmin z19.h, p1/m, z19.h, z0.h\n"
+                            "fmin z20.h, p2/m, z20.h, z0.h\n"
+                            "st1h z17.h, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "fmax z19.h, p1/m, z19.h, z1.h\n"
+                            "fmax z20.h, p2/m, z20.h, z1.h\n"
+                            "st1h z18.h, p0, [%[outptr7]]\n"
+                            "st1h z19.h, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1h z20.h, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
+                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+                          [inptr] "+r" (inptr), [p] "+r" (p)
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
+                        );
+                    }
+                    break;
+
+
+                }
+            }
+        }
+    }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
deleted file mode 100644
index 7479c8d..0000000
--- a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
+++ /dev/null
@@ -1,1208 +0,0 @@
-/*
- * Copyright (c) 2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-template<>
-inline void MergeResults<2, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
-{
-    const float *inptr = in;
-
-    for (int y=y0; y<ymax; y+=8) {
-        float *outptr0 = out + (y * ldout) + x0;
-        float *outptr1 = outptr0 + ldout;
-        float *outptr2 = outptr1 + ldout;
-        float *outptr3 = outptr2 + ldout;
-        float *outptr4 = outptr3 + ldout;
-        float *outptr5 = outptr4 + ldout;
-        float *outptr6 = outptr5 + ldout;
-        float *outptr7 = outptr6 + ldout;
-
-        const int height = ymax - y;
-
-        for (int i=x0; i<xmax; i+=(2 * get_vector_length<float>())) {
-            if (beta==0.0f)
-            {
-                switch(height) {
-                case 1:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 2:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 3:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 4:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 5:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 6:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 7:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
-                            "addvl %[outptr6], %[outptr6], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                default:
-                case 8:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
-                            "addvl %[outptr6], %[outptr6], #2\n"
-                            "prfm PSTL1KEEP, [%[outptr7], #0x40]\n"
-                            "addvl %[outptr7], %[outptr7], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                
-                }
-            }
-            else
-            {
-                switch(height) {
-                case 1:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 2:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 3:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 4:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 5:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 6:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                case 7:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
-                            "addvl %[outptr6], %[outptr6], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                default:
-                case 8:
-                    {
-                        long w = xmax - i;
-                        long p = 0;
-                        /* Optimized routine to copy an entire block */
-                        __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
-                            "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
-                            "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr7]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #2\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #2\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #2\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
-                            "addvl %[outptr4], %[outptr4], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
-                            "addvl %[outptr5], %[outptr5], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
-                            "addvl %[outptr6], %[outptr6], #2\n"
-                            "prfm PLDL1KEEP, [%[outptr7], #0x40]\n"
-                            "addvl %[outptr7], %[outptr7], #2\n"
-                            "1:\n"
-                            "addvl %[inptr], %[inptr], #16\n"
-                        : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
-                          [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
-                        );
-                    }
-                    break;
-                
-                
-                }
-            }
-        }
-    }
-}
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
index 4b51066..f7feec4 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
@@ -26,11 +26,33 @@
 #ifdef __ARM_FEATURE_SVE
 
 template<>
-inline void MergeResults<3, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+void MergeResults<3, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float *bias, Activation act, bool append)
 {
     const float *inptr = in;
+    float nullbias[192] = { 0 };
+    float minval = - std::numeric_limits<float>::infinity();
+    float maxval =   std::numeric_limits<float>::infinity();
 
-    for (int y=y0; y<ymax; y+=8) {
+    switch(act.type)
+    {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            minval = 0.0f;
+            break;
+    }
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (3 * get_vector_length<float>() * sizeof(float)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
         float *outptr0 = out + (y * ldout) + x0;
         float *outptr1 = outptr0 + ldout;
         float *outptr2 = outptr1 + ldout;
@@ -42,45 +64,52 @@
 
         const int height = ymax - y;
 
-        for (int i=x0; i<xmax; i+=(3 * get_vector_length<float>())) {
-            if (beta==0.0f)
+        for (int i=x0; i<xmax; i+=(3 * get_vector_length<float>()))
+        {
+            if (append)
             {
-                switch(height) {
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -91,47 +120,61 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -142,59 +185,82 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -205,70 +271,102 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z11.s, p0/m, z11.s, z0.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z12.s, p1/m, z12.s, z0.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmax z11.s, p0/m, z11.s, z1.s\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "fmax z12.s, p1/m, z12.s, z1.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -279,82 +377,123 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.s, p0/m, z11.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.s, p1/m, z12.s, z0.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z11.s, p0/m, z11.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z12.s, p1/m, z12.s, z1.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
                             "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
                             "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -365,94 +504,144 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.s, p0/m, z11.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.s, p1/m, z12.s, z0.s\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.s, p0/m, z11.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.s, p1/m, z12.s, z1.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
                             "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z10.s, p1/m, z10.s, z0.s\n"
                             "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmin z11.s, p2/m, z11.s, z0.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z10.s, p1/m, z10.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z11.s, p2/m, z11.s, z1.s\n"
                             "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -463,106 +652,165 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.s, p0/m, z11.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.s, p1/m, z12.s, z0.s\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.s, p0/m, z11.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.s, p1/m, z12.s, z1.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "fmin z10.s, p1/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z10.s, p1/m, z10.s, z1.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z11.s, p2/m, z11.s, z0.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "fmax z11.s, p2/m, z11.s, z1.s\n"
                             "addvl %[outptr2], %[outptr2], #3\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z12.s, p0/m, z12.s, z0.s\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmin z13.s, p1/m, z13.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z12.s, p0/m, z12.s, z1.s\n"
                             "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z13.s, p1/m, z13.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z14.s, p2/m, z14.s, z0.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
                             "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fmax z14.s, p2/m, z14.s, z1.s\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
                             "addvl %[outptr5], %[outptr5], #3\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
                             "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
                             "addvl %[inptr], %[inptr], #24\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -574,117 +822,185 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "fmin z10.s, p0/m, z10.s, z0.s\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z8.s, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z9.s, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
+                            "fmax z10.s, p0/m, z10.s, z1.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z11.s, p1/m, z11.s, z0.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fmax z11.s, p1/m, z11.s, z1.s\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "fmin z12.s, p2/m, z12.s, z0.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fmax z12.s, p2/m, z12.s, z1.s\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmin z10.s, p2/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z10.s, p2/m, z10.s, z1.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "fmin z11.s, p0/m, z11.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z12.s, p1/m, z12.s, z0.s\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z11.s, p0/m, z11.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z12.s, p1/m, z12.s, z1.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z10.s, z10.s, z2.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "fmin z10.s, p1/m, z10.s, z0.s\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "fadd z11.s, z11.s, z3.s\n"
+                            "ld1w z7.s, p0/z, [%[outptr7]]\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z12.s, z12.s, z4.s\n"
+                            "ld1w z15.s, p0/z, [x8, #5, MUL VL]\n"
+                            "fmax z10.s, p1/m, z10.s, z1.s\n"
+                            "ld1w z8.s, p1/z, [%[outptr7], #1, MUL VL]\n"
+                            "fmin z11.s, p2/m, z11.s, z0.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z13.s, z13.s, z5.s\n"
+                            "ld1w z16.s, p1/z, [x8, #6, MUL VL]\n"
+                            "fmin z12.s, p0/m, z12.s, z0.s\n"
+                            "ld1w z9.s, p2/z, [%[outptr7], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z6.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "fmax z11.s, p2/m, z11.s, z1.s\n"
+                            "ld1w z17.s, p2/z, [x8, #7, MUL VL]\n"
+                            "fmin z13.s, p1/m, z13.s, z0.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z12.s, p0/m, z12.s, z1.s\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmin z14.s, p2/m, z14.s, z0.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z13.s, p1/m, z13.s, z1.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fadd z15.s, z15.s, z7.s\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z14.s, p2/m, z14.s, z1.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "fadd z16.s, z16.s, z8.s\n"
                             "addvl %[outptr2], %[outptr2], #3\n"
-                            "fmul z10.s, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "fmul z11.s, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
+                            "fmin z15.s, p0/m, z15.s, z0.s\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "fadd z17.s, z17.s, z9.s\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmin z16.s, p1/m, z16.s, z0.s\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "fmax z15.s, p0/m, z15.s, z1.s\n"
                             "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmin z17.s, p2/m, z17.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmax z16.s, p1/m, z16.s, z1.s\n"
+                            "st1w z15.s, p0, [%[outptr7]]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z17.s, p2/m, z17.s, z1.s\n"
                             "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z16.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
                             "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1w z17.s, p2, [%[outptr7], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
                             "addvl %[outptr6], %[outptr6], #3\n"
-                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
                             "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
                             "addvl %[inptr], %[inptr], #24\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -694,48 +1010,54 @@
             }
             else
             {
-                switch(height) {
+                const float *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -746,59 +1068,58 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -809,77 +1130,76 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -890,94 +1210,93 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "fadd z14.s, z14.s, z2.s\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "fadd z15.s, z15.s, z3.s\n"
+                            "fadd z16.s, z16.s, z4.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -988,112 +1307,111 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fadd z15.s, z15.s, z3.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fadd z16.s, z16.s, z4.s\n"
+                            "fadd z17.s, z17.s, z2.s\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "fadd z18.s, z18.s, z3.s\n"
+                            "fadd z19.s, z19.s, z4.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "fmin z18.s, p1/m, z18.s, z0.s\n"
+                            "fmin z19.s, p2/m, z19.s, z0.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.s, p1/m, z18.s, z1.s\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "fmax z19.s, p2/m, z19.s, z1.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1104,130 +1422,129 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.s, z15.s, z3.s\n"
+                            "fadd z16.s, z16.s, z4.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "fadd z18.s, z18.s, z3.s\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "fmin z18.s, p1/m, z18.s, z0.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fadd z19.s, z19.s, z4.s\n"
+                            "fadd z20.s, z20.s, z2.s\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fmax z18.s, p1/m, z18.s, z1.s\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmin z19.s, p2/m, z19.s, z0.s\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "fmin z20.s, p0/m, z20.s, z0.s\n"
+                            "fadd z13.s, z13.s, z3.s\n"
+                            "fadd z14.s, z14.s, z4.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fmax z19.s, p2/m, z19.s, z1.s\n"
+                            "fmax z20.s, p0/m, z20.s, z1.s\n"
+                            "fmin z13.s, p1/m, z13.s, z0.s\n"
+                            "fmin z14.s, p2/m, z14.s, z0.s\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmax z13.s, p1/m, z13.s, z1.s\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "fmax z14.s, p2/m, z14.s, z1.s\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1238,148 +1555,147 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.s, z15.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fadd z16.s, z16.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fadd z18.s, z18.s, z3.s\n"
+                            "fadd z19.s, z19.s, z4.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "fmin z18.s, p1/m, z18.s, z0.s\n"
+                            "fmin z19.s, p2/m, z19.s, z0.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z20.s, z20.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "fadd z13.s, z13.s, z3.s\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.s, p1/m, z18.s, z1.s\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "fmax z19.s, p2/m, z19.s, z1.s\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "fmin z20.s, p0/m, z20.s, z0.s\n"
+                            "fmin z13.s, p1/m, z13.s, z0.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z14.s, z14.s, z4.s\n"
+                            "fadd z15.s, z15.s, z2.s\n"
+                            "fmax z20.s, p0/m, z20.s, z1.s\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fmax z13.s, p1/m, z13.s, z1.s\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmin z14.s, p2/m, z14.s, z0.s\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "fmin z15.s, p0/m, z15.s, z0.s\n"
+                            "fadd z16.s, z16.s, z3.s\n"
+                            "fadd z17.s, z17.s, z4.s\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fmax z14.s, p2/m, z14.s, z1.s\n"
+                            "fmax z15.s, p0/m, z15.s, z1.s\n"
+                            "fmin z16.s, p1/m, z16.s, z0.s\n"
+                            "fmin z17.s, p2/m, z17.s, z0.s\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fmax z16.s, p1/m, z16.s, z1.s\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "fmax z17.s, p2/m, z17.s, z1.s\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1391,165 +1707,164 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
+                            "mov z0.s, %s[maxval]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
+                            "mov z1.s, %s[minval]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0]]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr]]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4]]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6]]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "ld1w z11.s, p0/z, [%[outptr7]]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "fadd z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "fmul z8.s, z8.s, z3.s\n"
-                            "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "fmla z8.s, p0/m, z4.s, z2.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "fmul z9.s, z9.s, z3.s\n"
-                            "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
-                            "fmla z9.s, p0/m, z5.s, z2.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "fmul z10.s, z10.s, z3.s\n"
-                            "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
-                            "fmla z10.s, p0/m, z6.s, z2.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[outptr7], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "fmul z11.s, z11.s, z3.s\n"
-                            "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
-                            "fmla z11.s, p0/m, z7.s, z2.s\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fadd z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "fmin z13.s, p0/m, z13.s, z0.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "fadd z15.s, z15.s, z4.s\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z2.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "fmin z14.s, p1/m, z14.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "fmax z13.s, p0/m, z13.s, z1.s\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "fmax z14.s, p1/m, z14.s, z1.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "fmin z15.s, p2/m, z15.s, z0.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "fmin z16.s, p0/m, z16.s, z0.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "fadd z17.s, z17.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "fmax z15.s, p2/m, z15.s, z1.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "fmax z16.s, p0/m, z16.s, z1.s\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "fmin z17.s, p1/m, z17.s, z0.s\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "fadd z18.s, z18.s, z4.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "fadd z19.s, z19.s, z2.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "fmax z17.s, p1/m, z17.s, z1.s\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "fmin z18.s, p2/m, z18.s, z0.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "fmin z19.s, p0/m, z19.s, z0.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "fadd z20.s, z20.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "fmax z18.s, p2/m, z18.s, z1.s\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "fmax z19.s, p0/m, z19.s, z1.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "fmin z20.s, p1/m, z20.s, z0.s\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
-                            "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
+                            "fadd z13.s, z13.s, z4.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "fadd z14.s, z14.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "fmax z20.s, p1/m, z20.s, z1.s\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "fmin z13.s, p2/m, z13.s, z0.s\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "fmin z14.s, p0/m, z14.s, z0.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "fadd z15.s, z15.s, z3.s\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "fmax z13.s, p2/m, z13.s, z1.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "fmax z14.s, p0/m, z14.s, z1.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "fmin z15.s, p1/m, z15.s, z0.s\n"
+                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                            "fadd z16.s, z16.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "fadd z17.s, z17.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "fmax z15.s, p1/m, z15.s, z1.s\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "fmin z16.s, p2/m, z16.s, z0.s\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "fmin z17.s, p0/m, z17.s, z0.s\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "fadd z18.s, z18.s, z3.s\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "fmax z16.s, p2/m, z16.s, z1.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "fmax z17.s, p0/m, z17.s, z1.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "fmin z18.s, p1/m, z18.s, z0.s\n"
+                            "fadd z19.s, z19.s, z4.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "fadd z20.s, z20.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "fadd z13.s, z13.s, z3.s\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "fmax z18.s, p1/m, z18.s, z1.s\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "fmin z19.s, p2/m, z19.s, z0.s\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "fmin z20.s, p0/m, z20.s, z0.s\n"
+                            "fmin z13.s, p1/m, z13.s, z0.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "fadd z14.s, z14.s, z4.s\n"
+                            "ld1w z18.s, p0/z, [x8, #5, MUL VL]\n"
+                            "fmax z19.s, p2/m, z19.s, z1.s\n"
+                            "fmax z20.s, p0/m, z20.s, z1.s\n"
+                            "fmax z13.s, p1/m, z13.s, z1.s\n"
+                            "fmin z14.s, p2/m, z14.s, z0.s\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "fadd z15.s, z15.s, z2.s\n"
+                            "ld1w z19.s, p1/z, [x8, #6, MUL VL]\n"
+                            "fadd z16.s, z16.s, z3.s\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "fmax z14.s, p2/m, z14.s, z1.s\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "fmin z15.s, p0/m, z15.s, z0.s\n"
+                            "ld1w z20.s, p2/z, [x8, #7, MUL VL]\n"
+                            "fmin z16.s, p1/m, z16.s, z0.s\n"
+                            "fadd z17.s, z17.s, z4.s\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "fadd z18.s, z18.s, z2.s\n"
+                            "fmax z15.s, p0/m, z15.s, z1.s\n"
+                            "fmax z16.s, p1/m, z16.s, z1.s\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "fmin z17.s, p2/m, z17.s, z0.s\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "fmin z18.s, p0/m, z18.s, z0.s\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "fadd z19.s, z19.s, z3.s\n"
+                            "fmax z17.s, p2/m, z17.s, z1.s\n"
+                            "fadd z20.s, z20.s, z4.s\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "fmax z18.s, p0/m, z18.s, z1.s\n"
+                            "fmin z19.s, p1/m, z19.s, z0.s\n"
+                            "fmin z20.s, p2/m, z20.s, z0.s\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "fmax z19.s, p1/m, z19.s, z1.s\n"
+                            "fmax z20.s, p2/m, z20.s, z1.s\n"
+                            "st1w z18.s, p0, [%[outptr7]]\n"
+                            "st1w z19.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1w z20.s, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr), [minval] "w" (minval), [maxval] "w" (maxval)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_s32_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_s32_3VLx8.hpp
index 2ea38a7..03a2824 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_s32_3VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_s32_3VLx8.hpp
@@ -26,11 +26,20 @@
 #ifdef __ARM_FEATURE_SVE
 
 template<>
-inline void MergeResults<3, 8, true>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t alpha, const int32_t beta)
+void MergeResults<3, 8, true>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t *bias, Activation act, bool append)
 {
-    const int32_t *inptr = in;
+    UNUSED(act);
 
-    for (int y=y0; y<ymax; y+=8) {
+    const int32_t *inptr = in;
+    int32_t nullbias[192] = { 0 };
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (3 * get_vector_length<int32_t>() * sizeof(int32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
         int32_t *outptr0 = out + (y * ldout) + x0;
         int32_t *outptr1 = outptr0 + ldout;
         int32_t *outptr2 = outptr1 + ldout;
@@ -42,42 +51,44 @@
 
         const int height = ymax - y;
 
-        for (int i=x0; i<xmax; i+=(3 * get_vector_length<int32_t>())) {
-            if (beta==0)
+        for (int i=x0; i<xmax; i+=(3 * get_vector_length<int32_t>()))
+        {
+            if (append)
             {
-                switch(height) {
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -88,41 +99,47 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -133,50 +150,62 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -187,58 +216,76 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -249,67 +296,91 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -320,76 +391,106 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8]\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -400,85 +501,121 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #4, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -490,93 +627,135 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z11.s, p0/z, [x8, #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #4, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
-                            "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z7.s, p0/z, [%[outptr7]]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "ld1w z15.s, p0/z, [x8, #5, MUL VL]\n"
+                            "ld1w z8.s, p1/z, [%[outptr7], #1, MUL VL]\n"
+                            "ld1w z16.s, p1/z, [x8, #6, MUL VL]\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "ld1w z9.s, p2/z, [%[outptr7], #2, MUL VL]\n"
+                            "ld1w z17.s, p2/z, [x8, #7, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p0, [%[outptr7]]\n"
+                            "st1w z16.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -586,45 +765,46 @@
             }
             else
             {
-                switch(height) {
+                const int32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "ld1w z5.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -635,53 +815,44 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -692,68 +863,56 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z7.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -764,82 +923,67 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -850,97 +994,79 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z5.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z8.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #2, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -951,112 +1077,91 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z7.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8]\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1067,127 +1172,103 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z7.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z4.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z7.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8, #4, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z15.s, z15.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z16.s, z16.s, z3.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z4.s\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1199,141 +1280,114 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr7]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8]\n"
-                            "ld1w z7.s, p0/z, [%[outptr7], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #4, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z7.s, p0/z, [%[outptr7], #2, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #7, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
-                            "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z15.s, z15.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z16.s, z16.s, z3.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z4.s\n"
+                            "ld1w z18.s, p0/z, [x8, #5, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z18.s, z18.s, z2.s\n"
+                            "ld1w z19.s, p1/z, [x8, #6, MUL VL]\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "ld1w z20.s, p2/z, [x8, #7, MUL VL]\n"
+                            "add z19.s, z19.s, z3.s\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z20.s, z20.s, z4.s\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "st1w z18.s, p0, [%[outptr7]]\n"
+                            "st1w z19.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1w z20.s, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_u32_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_u32_3VLx8.hpp
index eb684e2..67a6eb3 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_u32_3VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_u32_3VLx8.hpp
@@ -26,11 +26,20 @@
 #ifdef __ARM_FEATURE_SVE
 
 template<>
-inline void MergeResults<3, 8, true>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta)
+void MergeResults<3, 8, true>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t *bias, Activation act, bool append)
 {
-    const uint32_t *inptr = in;
+    UNUSED(act);
 
-    for (int y=y0; y<ymax; y+=8) {
+    const uint32_t *inptr = in;
+    uint32_t nullbias[192] = { 0 };
+
+    if (!append && !bias)
+    {
+        memset(nullbias, 0, (3 * get_vector_length<uint32_t>() * sizeof(uint32_t)));
+    }
+
+    for (int y=y0; y<ymax; y+=8)
+    {
         uint32_t *outptr0 = out + (y * ldout) + x0;
         uint32_t *outptr1 = outptr0 + ldout;
         uint32_t *outptr2 = outptr1 + ldout;
@@ -42,42 +51,44 @@
 
         const int height = ymax - y;
 
-        for (int i=x0; i<xmax; i+=(3 * get_vector_length<uint32_t>())) {
-            if (beta==0u)
+        for (int i=x0; i<xmax; i+=(3 * get_vector_length<uint32_t>()))
+        {
+            if (append)
             {
-                switch(height) {
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -88,41 +99,47 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -133,50 +150,62 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -187,58 +216,76 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -249,67 +296,91 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -320,76 +391,106 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8]\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -400,85 +501,121 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #4, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -490,93 +627,135 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "ld1w z11.s, p0/z, [x8, #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[outptr0]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z10.s, p0/z, [%[inptr]]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "ld1w z3.s, p1/z, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z11.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z13.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z10.s, p0, [%[outptr0]]\n"
+                            "ld1w z4.s, p2/z, [%[outptr0], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "ld1w z12.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+                            "ld1w z6.s, p1/z, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z7.s, p2/z, [%[outptr1], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z8.s, p0/z, [%[outptr2]]\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p0, [%[outptr1]]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "ld1w z9.s, p1/z, [%[outptr2], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #7, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z10.s, p0/z, [x8, #4, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "ld1w z11.s, p0/z, [x8, #7, MUL VL]\n"
-                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
-                            "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
+                            "ld1w z2.s, p2/z, [%[outptr2], #2, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "ld1w z10.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "ld1w z3.s, p0/z, [%[outptr3]]\n"
+                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p0, [%[outptr2]]\n"
+                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "ld1w z4.s, p1/z, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "ld1w z12.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "ld1w z5.s, p2/z, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z13.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z10.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p0/z, [%[outptr4]]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "ld1w z7.s, p1/z, [%[outptr4], #1, MUL VL]\n"
+                            "ld1w z15.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z12.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "ld1w z8.s, p2/z, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z16.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "st1w z13.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "ld1w z9.s, p0/z, [%[outptr5]]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "ld1w z2.s, p1/z, [%[outptr5], #1, MUL VL]\n"
+                            "ld1w z10.s, p1/z, [x8]\n"
+                            "st1w z15.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "ld1w z3.s, p2/z, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z11.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z10.s, z10.s, z2.s\n"
+                            "st1w z16.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "ld1w z4.s, p0/z, [%[outptr6]]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z11.s, z11.s, z3.s\n"
+                            "st1w z17.s, p0, [%[outptr5]]\n"
+                            "ld1w z12.s, p0/z, [x8, #2, MUL VL]\n"
+                            "ld1w z5.s, p1/z, [%[outptr6], #1, MUL VL]\n"
+                            "ld1w z13.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z10.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z12.s, z12.s, z4.s\n"
+                            "ld1w z6.s, p2/z, [%[outptr6], #2, MUL VL]\n"
+                            "ld1w z14.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z13.s, z13.s, z5.s\n"
+                            "st1w z11.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "ld1w z7.s, p0/z, [%[outptr7]]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "add z14.s, z14.s, z6.s\n"
+                            "st1w z12.s, p0, [%[outptr6]]\n"
+                            "ld1w z15.s, p0/z, [x8, #5, MUL VL]\n"
+                            "ld1w z8.s, p1/z, [%[outptr7], #1, MUL VL]\n"
+                            "ld1w z16.s, p1/z, [x8, #6, MUL VL]\n"
+                            "st1w z13.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "add z15.s, z15.s, z7.s\n"
+                            "ld1w z9.s, p2/z, [%[outptr7], #2, MUL VL]\n"
+                            "ld1w z17.s, p2/z, [x8, #7, MUL VL]\n"
+                            "add z16.s, z16.s, z8.s\n"
+                            "st1w z14.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "add z17.s, z17.s, z9.s\n"
+                            "st1w z15.s, p0, [%[outptr7]]\n"
+                            "st1w z16.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -586,45 +765,46 @@
             }
             else
             {
-                switch(height) {
+                const uint32_t *biasptr = nullbias;
+                if (bias)
+                {
+                    biasptr = bias + i;
+                }
+
+                switch(height)
+                {
                 case 1:
                     {
                         long w = xmax - i;
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
-                            "ld1w z5.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "1:\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -635,53 +815,44 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "1:\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -692,68 +863,56 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z7.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "1:\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "addvl %[outptr0], %[outptr0], #3\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -764,82 +923,67 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "1:\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -850,97 +994,79 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z5.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z8.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
-                            "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #2, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "1:\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
+                            "addvl %[outptr1], %[outptr1], #3\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -951,112 +1077,91 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z7.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8]\n"
-                            "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "1:\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1067,127 +1172,103 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z7.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z4.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8]\n"
-                            "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z6.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z7.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z8.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z7.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z8.s, p0/z, [x8, #4, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "1:\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z15.s, z15.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z16.s, z16.s, z3.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z4.s\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;
@@ -1199,141 +1280,114 @@
                         long p = 0;
                         /* Optimized routine to copy an entire block */
                         __asm __volatile (
-                            "mov z2.s, %s[alpha]\n"
                             "addvl x8, %[inptr], #16\n"
-                            "mov z3.s, %s[beta]\n"
                             "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
                             "incw %[p], all, mul #1\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0]]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
-                            "ld1w z8.s, p0/z, [%[inptr]]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1]]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #3, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #6, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-7, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0]]\n"
-                            "ld1w z8.s, p0/z, [x8, #-4, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5]]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6]]\n"
-                            "st1w z9.s, p0, [%[outptr1]]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8, #-1, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr7]]\n"
-                            "st1w z10.s, p0, [%[outptr2]]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #2, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3]]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #5, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4]]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr5]]\n"
-                            "st1w z10.s, p0, [%[outptr6]]\n"
-                            "st1w z11.s, p0, [%[outptr7]]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
+                            "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+                            "ld1w z2.s, p0/z, [%[biasptr]]\n"
+                            "whilelt p1.s, %[p], %[w]\n"
+                            "ld1w z3.s, p0/z, [%[biasptr], #1, MUL VL]\n"
                             "incw %[p], all, mul #1\n"
+                            "ld1w z4.s, p0/z, [%[biasptr], #2, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #1, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #1, MUL VL]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #1, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #4, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z10.s, p0/z, [%[inptr], #7, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z11.s, p0/z, [x8, #-6, MUL VL]\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-3, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #1, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [x8]\n"
-                            "ld1w z7.s, p0/z, [%[outptr7], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "ld1w z10.s, p0/z, [x8, #3, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #6, MUL VL]\n"
-                            "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
-                            "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
-                            "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
-                            "whilelt p0.s, %[p], %[w]\n"
-                            "b.none 1f\n"
-                            "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
-                            "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
-                            "ld1w z4.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+                            "ld1w z13.s, p0/z, [%[inptr]]\n"
+                            "whilelt p2.s, %[p], %[w]\n"
+                            "ld1w z14.s, p1/z, [%[inptr], #1, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+                            "add z13.s, z13.s, z2.s\n"
+                            "ld1w z15.s, p2/z, [%[inptr], #2, MUL VL]\n"
+                            "ld1w z16.s, p0/z, [%[inptr], #3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
-                            "ld1w z8.s, p0/z, [%[inptr], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
-                            "ld1w z5.s, p0/z, [%[outptr1], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "ld1w z9.s, p0/z, [%[inptr], #5, MUL VL]\n"
-                            "ld1w z6.s, p0/z, [%[outptr2], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
-                            "ld1w z10.s, p0/z, [x8, #-8, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
-                            "ld1w z7.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+                            "add z14.s, z14.s, z3.s\n"
+                            "st1w z13.s, p0, [%[outptr0]]\n"
+                            "add z15.s, z15.s, z4.s\n"
+                            "ld1w z17.s, p1/z, [%[inptr], #4, MUL VL]\n"
+                            "add z16.s, z16.s, z2.s\n"
+                            "ld1w z18.s, p2/z, [%[inptr], #5, MUL VL]\n"
+                            "ld1w z19.s, p0/z, [%[inptr], #6, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+                            "add z17.s, z17.s, z3.s\n"
+                            "st1w z14.s, p1, [%[outptr0], #1, MUL VL]\n"
+                            "add z18.s, z18.s, z4.s\n"
+                            "ld1w z20.s, p1/z, [%[inptr], #7, MUL VL]\n"
+                            "add z19.s, z19.s, z2.s\n"
+                            "ld1w z13.s, p2/z, [x8, #-8, MUL VL]\n"
+                            "ld1w z14.s, p0/z, [x8, #-7, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+                            "add z20.s, z20.s, z3.s\n"
+                            "st1w z15.s, p2, [%[outptr0], #2, MUL VL]\n"
+                            "add z13.s, z13.s, z4.s\n"
+                            "ld1w z15.s, p1/z, [x8, #-6, MUL VL]\n"
+                            "add z14.s, z14.s, z2.s\n"
                             "addvl %[outptr0], %[outptr0], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
-                            "ld1w z11.s, p0/z, [x8, #-5, MUL VL]\n"
+                            "st1w z16.s, p0, [%[outptr1]]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+                            "add z15.s, z15.s, z3.s\n"
+                            "ld1w z16.s, p2/z, [x8, #-5, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+                            "st1w z17.s, p1, [%[outptr1], #1, MUL VL]\n"
+                            "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+                            "add z16.s, z16.s, z4.s\n"
+                            "ld1w z17.s, p0/z, [x8, #-4, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+                            "st1w z18.s, p2, [%[outptr1], #2, MUL VL]\n"
                             "addvl %[outptr1], %[outptr1], #3\n"
-                            "ld1w z4.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+                            "add z17.s, z17.s, z2.s\n"
+                            "ld1w z18.s, p1/z, [x8, #-3, MUL VL]\n"
                             "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
-                            "ld1w z8.s, p0/z, [x8, #-2, MUL VL]\n"
-                            "addvl %[outptr2], %[outptr2], #3\n"
-                            "ld1w z5.s, p0/z, [%[outptr5], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
-                            "add z8.s, z8.s, z4.s\n"
-                            "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
-                            "ld1w z9.s, p0/z, [x8, #1, MUL VL]\n"
-                            "addvl %[outptr3], %[outptr3], #3\n"
-                            "ld1w z6.s, p0/z, [%[outptr6], #2, MUL VL]\n"
-                            "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
-                            "add z9.s, z9.s, z5.s\n"
-                            "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
-                            "ld1w z10.s, p0/z, [x8, #4, MUL VL]\n"
-                            "addvl %[outptr4], %[outptr4], #3\n"
-                            "ld1w z7.s, p0/z, [%[outptr7], #2, MUL VL]\n"
-                            "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
-                            "addvl %[outptr5], %[outptr5], #3\n"
-                            "add z10.s, z10.s, z6.s\n"
-                            "ld1w z11.s, p0/z, [x8, #7, MUL VL]\n"
-                            "add z11.s, z11.s, z7.s\n"
-                            "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
-                            "addvl %[outptr6], %[outptr6], #3\n"
-                            "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
-                            "addvl %[outptr7], %[outptr7], #3\n"
-                            "1:\n"
+                            "st1w z19.s, p0, [%[outptr2]]\n"
+                            "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+                            "add z18.s, z18.s, z3.s\n"
+                            "ld1w z19.s, p2/z, [x8, #-2, MUL VL]\n"
+                            "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+                            "st1w z20.s, p1, [%[outptr2], #1, MUL VL]\n"
                             "addvl %[inptr], %[inptr], #24\n"
+                            "add z19.s, z19.s, z4.s\n"
+                            "ld1w z20.s, p0/z, [x8, #-1, MUL VL]\n"
+                            "st1w z13.s, p2, [%[outptr2], #2, MUL VL]\n"
+                            "addvl %[outptr2], %[outptr2], #3\n"
+                            "add z20.s, z20.s, z2.s\n"
+                            "ld1w z13.s, p1/z, [x8]\n"
+                            "st1w z14.s, p0, [%[outptr3]]\n"
+                            "ld1w z14.s, p2/z, [x8, #1, MUL VL]\n"
+                            "add z13.s, z13.s, z3.s\n"
+                            "st1w z15.s, p1, [%[outptr3], #1, MUL VL]\n"
+                            "add z14.s, z14.s, z4.s\n"
+                            "ld1w z15.s, p0/z, [x8, #2, MUL VL]\n"
+                            "st1w z16.s, p2, [%[outptr3], #2, MUL VL]\n"
+                            "addvl %[outptr3], %[outptr3], #3\n"
+                            "add z15.s, z15.s, z2.s\n"
+                            "ld1w z16.s, p1/z, [x8, #3, MUL VL]\n"
+                            "st1w z17.s, p0, [%[outptr4]]\n"
+                            "ld1w z17.s, p2/z, [x8, #4, MUL VL]\n"
+                            "add z16.s, z16.s, z3.s\n"
+                            "st1w z18.s, p1, [%[outptr4], #1, MUL VL]\n"
+                            "add z17.s, z17.s, z4.s\n"
+                            "ld1w z18.s, p0/z, [x8, #5, MUL VL]\n"
+                            "st1w z19.s, p2, [%[outptr4], #2, MUL VL]\n"
+                            "addvl %[outptr4], %[outptr4], #3\n"
+                            "add z18.s, z18.s, z2.s\n"
+                            "ld1w z19.s, p1/z, [x8, #6, MUL VL]\n"
+                            "st1w z20.s, p0, [%[outptr5]]\n"
+                            "ld1w z20.s, p2/z, [x8, #7, MUL VL]\n"
+                            "add z19.s, z19.s, z3.s\n"
+                            "st1w z13.s, p1, [%[outptr5], #1, MUL VL]\n"
+                            "add z20.s, z20.s, z4.s\n"
+                            "st1w z14.s, p2, [%[outptr5], #2, MUL VL]\n"
+                            "addvl %[outptr5], %[outptr5], #3\n"
+                            "st1w z15.s, p0, [%[outptr6]]\n"
+                            "st1w z16.s, p1, [%[outptr6], #1, MUL VL]\n"
+                            "st1w z17.s, p2, [%[outptr6], #2, MUL VL]\n"
+                            "addvl %[outptr6], %[outptr6], #3\n"
+                            "st1w z18.s, p0, [%[outptr7]]\n"
+                            "st1w z19.s, p1, [%[outptr7], #1, MUL VL]\n"
+                            "st1w z20.s, p2, [%[outptr7], #2, MUL VL]\n"
+                            "addvl %[outptr7], %[outptr7], #3\n"
                         : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
                           [inptr] "+r" (inptr), [p] "+r" (p)
-                        : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
-                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+                        : [w] "r" (w), [biasptr] "r" (biasptr)
+                        : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "memory", "cc"
                         );
                     }
                     break;