Integrate SME2 kernels

* Add SME/SME2 detection.
* Integrate SME2 implementation for:
  - Normal convolution
  - Winograd
  - Depthwise convolution
  - Pooling

Resolves: COMPMID-5700
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I2f1ca1d05f8cfeee9309ed1c0a36096a4a6aad5c
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8692
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/common/cpuinfo/CpuInfo.cpp b/src/common/cpuinfo/CpuInfo.cpp
index 0be21be..f76c0cc 100644
--- a/src/common/cpuinfo/CpuInfo.cpp
+++ b/src/common/cpuinfo/CpuInfo.cpp
@@ -336,17 +336,18 @@
 #elif(BARE_METAL) && defined(__aarch64__)        /* !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && (defined(__arm__) || defined(__aarch64__)) */
 
     // Assume single CPU in bare metal mode.  Just read the ID register and feature bits directly.
-    uint64_t isar0 = 0, isar1 = 0, pfr0 = 0, svefr0 = 0, midr = 0;
+    uint64_t isar0 = 0, isar1 = 0, pfr0 = 0, pfr1 = 0, svefr0 = 0, midr = 0;
     ARM_COMPUTE_GET_FEATURE_REG(isar0, ID_AA64ISAR0_EL1);
     ARM_COMPUTE_GET_FEATURE_REG(isar1, ID_AA64ISAR1_EL1);
     ARM_COMPUTE_GET_FEATURE_REG(pfr0, ID_AA64PFR0_EL1);
+    ARM_COMPUTE_GET_FEATURE_REG(pfr1, ID_AA64PFR1_EL1);
     ARM_COMPUTE_GET_FEATURE_REG(midr, MIDR_EL1);
     if((pfr0 >> 32) & 0xf)
     {
         svefr0 = get_sve_feature_reg();
     }
 
-    CpuIsaInfo            isa = init_cpu_isa_from_regs(isar0, isar1, pfr0, svefr0, midr);
+    CpuIsaInfo            isa = init_cpu_isa_from_regs(isar0, isar1, pfr0, pfr1, svefr0, midr);
     std::vector<CpuModel> cpus_model(1, midr_to_model(midr));
     CpuInfo               info(isa, cpus_model);
     return info;
diff --git a/src/common/cpuinfo/CpuInfo.h b/src/common/cpuinfo/CpuInfo.h
index 135ff96..953e488 100644
--- a/src/common/cpuinfo/CpuInfo.h
+++ b/src/common/cpuinfo/CpuInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -71,6 +71,14 @@
     {
         return _isa.sve2;
     }
+    bool has_sme() const
+    {
+        return _isa.sme;
+    }
+    bool has_sme2() const
+    {
+        return _isa.sme2;
+    }
     bool has_fp16() const
     {
         return _isa.fp16;
diff --git a/src/common/cpuinfo/CpuIsaInfo.cpp b/src/common/cpuinfo/CpuIsaInfo.cpp
index 6165533..23da54a 100644
--- a/src/common/cpuinfo/CpuIsaInfo.cpp
+++ b/src/common/cpuinfo/CpuIsaInfo.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -42,6 +42,7 @@
 #define ARM_COMPUTE_CPU_FEATURE_HWCAP2_SVEBF16 (1 << 12)
 #define ARM_COMPUTE_CPU_FEATURE_HWCAP2_I8MM (1 << 13)
 #define ARM_COMPUTE_CPU_FEATURE_HWCAP2_BF16 (1 << 14)
+#define ARM_COMPUTE_CPU_FEATURE_HWCAP2_SME (1 << 23)
 
 namespace arm_compute
 {
@@ -69,6 +70,10 @@
     isa.sve  = is_feature_supported(hwcaps, ARM_COMPUTE_CPU_FEATURE_HWCAP_SVE);
     isa.sve2 = is_feature_supported(hwcaps2, ARM_COMPUTE_CPU_FEATURE_HWCAP2_SVE2);
 
+    // Detection of SME from type HWCAP2 in the auxillary vector
+    isa.sme   = is_feature_supported(hwcaps2, ARM_COMPUTE_CPU_FEATURE_HWCAP2_SME);
+    isa.sme2  = isa.sme; // Needs to be set properly
+
     // Data-type support
     isa.fp16    = is_feature_supported(hwcaps, ARM_COMPUTE_CPU_FEATURE_HWCAP_FPHP | ARM_COMPUTE_CPU_FEATURE_HWCAP_ASIMDHP);
     isa.bf16    = is_feature_supported(hwcaps2, ARM_COMPUTE_CPU_FEATURE_HWCAP2_BF16);
@@ -87,7 +92,7 @@
 }
 #endif /* defined(__aarch64__) */
 
-void decode_regs(CpuIsaInfo &isa, const uint64_t isar0, const uint64_t isar1, const uint64_t pfr0, const uint64_t svefr0)
+void decode_regs(CpuIsaInfo &isa, const uint64_t isar0, const uint64_t isar1, const uint64_t pfr0, const uint64_t pfr1, const uint64_t svefr0)
 {
     auto is_supported = [](uint64_t feature_reg, uint8_t feature_pos) -> bool
     {
@@ -97,6 +102,8 @@
     // High-level SIMD support
     isa.sve  = is_supported(pfr0, 32);
     isa.sve2 = is_supported(svefr0, 0);
+    isa.sme  = is_supported(pfr1, 24);
+    isa.sme2 = (((pfr1 >> 24) & 0xf) > 1);
 
     // Data-type support
     isa.fp16    = is_supported(pfr0, 16);
@@ -140,11 +147,11 @@
     return isa;
 }
 
-CpuIsaInfo init_cpu_isa_from_regs(uint64_t isar0, uint64_t isar1, uint64_t pfr0, uint64_t svefr0, uint64_t midr)
+CpuIsaInfo init_cpu_isa_from_regs(uint64_t isar0, uint64_t isar1, uint64_t pfr0, uint64_t pfr1, uint64_t svefr0, uint64_t midr)
 {
     CpuIsaInfo isa;
 
-    decode_regs(isa, isar0, isar1, pfr0, svefr0);
+    decode_regs(isa, isar0, isar1, pfr0, pfr1, svefr0);
 
     const CpuModel model = midr_to_model(midr);
     allowlisted_model_features(isa, model);
@@ -152,4 +159,4 @@
     return isa;
 }
 } // namespace cpuinfo
-} // namespace arm_compute
\ No newline at end of file
+} // namespace arm_compute
diff --git a/src/common/cpuinfo/CpuIsaInfo.h b/src/common/cpuinfo/CpuIsaInfo.h
index a2aace1..b92b653 100644
--- a/src/common/cpuinfo/CpuIsaInfo.h
+++ b/src/common/cpuinfo/CpuIsaInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -40,6 +40,8 @@
     bool neon{ false };
     bool sve{ false };
     bool sve2{ false };
+    bool sme{ false };
+    bool sme2{ false };
 
     /* Data-type extensions support */
     bool fp16{ false };
@@ -67,13 +69,14 @@
  *
  * @param[in] isar0  Value of Instruction Set Attribute Register 0 (ID_AA64ISAR0_EL1)
  * @param[in] isar1  Value of Instruction Set Attribute Register 1 (ID_AA64ISAR1_EL1)
- * @param[in] pfr0   Value of  Processor Feature Register 0 (ID_AA64PFR0_EL1)
+ * @param[in] pfr0   Value of Processor Feature Register 0 (ID_AA64PFR0_EL1)
+ * @param[in] pfr1   Value of Processor Feature Register 1 (ID_AA64PFR1_EL1)
  * @param[in] svefr0 Value of SVE feature ID register 0 (ID_AA64ZFR0_EL1)
  * @param[in] midr   Value of Main ID Register (MIDR)
  *
  * @return CpuIsaInfo A populated ISA feature structure
  */
-CpuIsaInfo init_cpu_isa_from_regs(uint64_t isar0, uint64_t isar1, uint64_t pfr0, uint64_t svefr0, uint64_t midr);
+CpuIsaInfo init_cpu_isa_from_regs(uint64_t isar0, uint64_t isar1, uint64_t pfr0, uint64_t pfr1, uint64_t svefr0, uint64_t midr);
 } // namespace cpuinfo
 } // namespace arm_compute
 
diff --git a/src/core/CPP/CPPTypes.cpp b/src/core/CPP/CPPTypes.cpp
index bd5236f..6a3f66f 100644
--- a/src/core/CPP/CPPTypes.cpp
+++ b/src/core/CPP/CPPTypes.cpp
@@ -103,12 +103,12 @@
 
 bool CPUInfo::has_sme() const
 {
-    return false;
+    return _impl->info.has_sme();
 }
 
 bool CPUInfo::has_sme2() const
 {
-    return false;
+    return _impl->info.has_sme2();
 }
 
 CPUModel CPUInfo::get_cpu_model() const
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_fp32.cpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_fp32.cpp
index 643cf1d..09ee983 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_fp32.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_fp32.cpp
@@ -32,8 +32,27 @@
 
 #include "depthwise_implementation_constraints.hpp"
 
+#include "interleaves/list.hpp"
+
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#include "kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za.hpp"
+#include "kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za.hpp"
+
+#include "kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za.hpp"
+#include "kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za.hpp"
+#include "kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za.hpp"
+#include "kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za.hpp"
+
+#include "kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp"
+#include "kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp"
+#include "kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp"
+#include "kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
+
 #include "kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp"
 #include "kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp"
 #include "kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp"
@@ -72,6 +91,18 @@
           );
   }
 
+  template <class Strategy>
+  unsigned int planar_cycle_estimate(const DepthwiseArgs &args, const Nothing &)
+  {
+    // First-pass: compute the number of output pixels which will be computed.
+    return arm_gemm::roundup(args.output_rows, Strategy::output_rows) *
+           args.output_cols *
+           arm_gemm::iceildiv(
+            (long unsigned) args.input_channels * args.channel_multiplier,
+            arm_gemm::utils::get_vector_length<typename Strategy::return_type>(Strategy::vl_type)
+          );
+  }
+
 #if defined(__aarch64__)
   unsigned int not_preferred(const DepthwiseArgs &, const Nothing &)
   {
@@ -89,6 +120,165 @@
 static const DepthwiseImplementation<float, float> depthwise_fp32_methods[] = {
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za",
+    constraint(fast_mode_enabled,
+               cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za",
+    constraint(fast_mode_enabled,
+               cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za",
+    constraint(fast_mode_enabled,
+               cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za",
+    constraint(fast_mode_enabled,
+               cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32_planar_3x3_s1_4rows_mla_za",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_planar_3x3_s1_4rows_mla_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    [] (const DepthwiseArgs &args, const Nothing &os) -> unsigned int {
+      // Heuristic, don't prefer this kernel unless the input plane is greater
+      // than the number of channels.
+      if (args.input_rows * args.input_cols < args.input_channels)
+        return UINT32_MAX;
+
+      return planar_cycle_estimate<sme2_fp32_planar_3x3_s1_4rows_mla_za>(args, os);
+    },
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_planar_3x3_s1_4rows_mla_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32_planar_3x3_s2_4rows_mla_za",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_planar_3x3_s2_4rows_mla_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    planar_cycle_estimate<sme2_fp32_planar_3x3_s2_4rows_mla_za>,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_planar_3x3_s2_4rows_mla_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32_planar_5x5_s1_4rows_mla_za",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_planar_5x5_s1_4rows_mla_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_planar_5x5_s1_4rows_mla_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_fp32_planar_5x5_s2_4rows_mla_za",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_planar_5x5_s2_4rows_mla_za>,
+               has_no_channel_multiplier, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_planar_5x5_s2_4rows_mla_za(args.cpu_info);
+      return new DepthwisePlanar<float>(strat, args);
+    },
+  },
+
+  {
+    DepthwiseMethod::DEPTHFIRST,
+    "sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst",
+    constraint(cpu_has_sme,  cpu_has_sme2,
+               is_supported<sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst>,
+               has_no_channel_multiplier),
+    cycle_estimate<sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst>,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(args.cpu_info);
+      return new DepthwiseDepthfirst<float, float, float, float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::DEPTHFIRST,
+    "sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst>,
+               has_no_channel_multiplier),
+    cycle_estimate<sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst>,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(args.cpu_info);
+      return new DepthwiseDepthfirst<float, float, float, float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::DEPTHFIRST,
+    "sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst>,
+               has_no_channel_multiplier),
+    cycle_estimate<sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst>,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(args.cpu_info);
+      return new DepthwiseDepthfirst<float, float, float, float>(strat, args);
+    },
+  },
+  {
+    DepthwiseMethod::DEPTHFIRST,
+    "sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst",
+    constraint(cpu_has_sme, cpu_has_sme2,
+               is_supported<sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst>,
+               has_no_channel_multiplier),
+    cycle_estimate<sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst>,
+    [] (const DepthwiseArgs &args, const Nothing &) -> DepthwiseCommon<float, float, float> * {
+      auto strat = new sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(args.cpu_info);
+      return new DepthwiseDepthfirst<float, float, float, float>(strat, args);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
   {
     DepthwiseMethod::DEPTHFIRST,
     "sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst",
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
index 78b6aec..1ba7694 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
@@ -85,6 +85,18 @@
   return args.cpu_info->has_dotprod();
 }
 
+bool cpu_has_sme(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
+bool cpu_has_sme(const DepthwiseArgs &args, const void *)
+{
+  return args.cpu_info->has_sme();
+}
+
+bool cpu_has_sme2(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
+bool cpu_has_sme2(const DepthwiseArgs &args, const void *)
+{
+  return args.cpu_info->has_sme2();
+}
+
 bool cpu_has_sve(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
 bool cpu_has_sve(const DepthwiseArgs &args, const void *)
 {
@@ -115,6 +127,15 @@
   return args.channel_multiplier > 1;
 }
 
+// Planar kernels require a "priming" step before the main processing loop.  The kernels can prime with left padding
+// or input data, but not right padding - which could be needed in some extreme cases such as a 5x5 kernel, width 1
+// padding 2.  These are rare enough and can be handled with other kernels anyway, so filter them out with this.
+bool no_prime_right_pad(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
+bool no_prime_right_pad(const DepthwiseArgs &args, const void *)
+{
+  return (args.input_cols + args.padding.left) >= (args.kernel_cols - 1);
+}
+
 bool qp_has_no_left_shift(const DepthwiseArgs &args, const void *_qp) __attribute__ ((unused));
 bool qp_has_no_left_shift(const DepthwiseArgs &, const void *_qp)
 {
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
index ff5098d..f3160fb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
@@ -256,6 +256,9 @@
   {
   }
 
+  DepthwisePlanar(DepthwisePlanar &) = delete;
+  DepthwisePlanar &operator=(DepthwisePlanar &) = delete;
+
   size_t get_storage_size(void) const override
   {
     return m_strat->get_storage_size(this->m_args);
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_s8q.cpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_s8q.cpp
index 4ff249a..2d03183 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_s8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_s8q.cpp
@@ -28,11 +28,19 @@
 #include "depthwise_depthfirst.hpp"
 #include "depthwise_depthfirst_generic.hpp"
 #include "depthwise_depthfirst_multiplier.hpp"
+#include "depthwise_planar.hpp"
 
 #include "depthwise_implementation_constraints.hpp"
 
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#include "kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za.hpp"
+#include "kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
+
 #include "kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp"
 #include "kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp"
 #include "kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp"
@@ -73,6 +81,60 @@
 static const DepthwiseImplementation<int8_t, int8_t, int8_t, Requantize32> depthwise_s8q_methods[] = {
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_s8q_planar_3x3_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_s8q_planar_3x3_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<int8_t, int8_t, int8_t> * {
+      auto strat = new sme2_s8q_planar_3x3_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_s8q_planar_3x3_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_s8q_planar_3x3_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<int8_t, int8_t, int8_t> * {
+      auto strat = new sme2_s8q_planar_3x3_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_s8q_planar_5x5_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_s8q_planar_5x5_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<int8_t, int8_t, int8_t> * {
+      auto strat = new sme2_s8q_planar_5x5_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_s8q_planar_5x5_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_s8q_planar_5x5_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<int8_t, int8_t, int8_t> * {
+      auto strat = new sme2_s8q_planar_5x5_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<int8_t>(strat, args, qp);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
   {
     DepthwiseMethod::DEPTHFIRST,
     "sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst",
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8q.cpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8q.cpp
index b1489d0..9dbd89f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8q.cpp
@@ -28,11 +28,19 @@
 #include "depthwise_depthfirst.hpp"
 #include "depthwise_depthfirst_generic.hpp"
 #include "depthwise_depthfirst_multiplier.hpp"
+#include "depthwise_planar.hpp"
 
 #include "depthwise_implementation_constraints.hpp"
 
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#include "kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za.hpp"
+#include "kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
+
 #include "kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp"
 #include "kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp"
 #include "kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp"
@@ -66,6 +74,60 @@
 static const DepthwiseImplementation<uint8_t, uint8_t, uint8_t, Requantize32> depthwise_u8q_methods[] = {
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8q_planar_3x3_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8q_planar_3x3_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, uint8_t, uint8_t> * {
+      auto strat = new sme2_u8q_planar_3x3_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8q_planar_3x3_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8q_planar_3x3_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, uint8_t, uint8_t> * {
+      auto strat = new sme2_u8q_planar_3x3_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8q_planar_5x5_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8q_planar_5x5_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, uint8_t, uint8_t> * {
+      auto strat = new sme2_u8q_planar_5x5_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8q_planar_5x5_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8q_planar_5x5_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift, no_prime_right_pad),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, uint8_t, uint8_t> * {
+      auto strat = new sme2_u8q_planar_5x5_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t>(strat, args, qp);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
   {
     DepthwiseMethod::DEPTHFIRST,
     "sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst",
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8s8u8q.cpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8s8u8q.cpp
index 9b98901..0665c67 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8s8u8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_u8s8u8q.cpp
@@ -28,11 +28,18 @@
 #include "depthwise_depthfirst.hpp"
 #include "depthwise_depthfirst_generic.hpp"
 #include "depthwise_depthfirst_multiplier.hpp"
+#include "depthwise_planar.hpp"
 
 #include "depthwise_implementation_constraints.hpp"
 
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#include "kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za.hpp"
+#include "kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za.hpp"
+#include "kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
 #include "kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp"
 #include "kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp"
 #include "kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp"
@@ -54,6 +61,60 @@
 static const DepthwiseImplementation<uint8_t, int8_t, uint8_t, Requantize32> depthwise_u8q_methods[] = {
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, int8_t, uint8_t> * {
+      auto strat = new sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t, int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, int8_t, uint8_t> * {
+      auto strat = new sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t, int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, int8_t, uint8_t> * {
+      auto strat = new sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t, int8_t>(strat, args, qp);
+    },
+  },
+  {
+    DepthwiseMethod::PLANAR,
+    "sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za",
+    constraint<Requantize32>(cpu_has_sme, cpu_has_sme2,
+                             is_supported<sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za>,
+                             has_no_channel_multiplier,
+                             qp_has_no_left_shift),
+    nullptr,
+    [] (const DepthwiseArgs &args, const Requantize32 &qp) -> DepthwiseCommon<uint8_t, int8_t, uint8_t> * {
+      auto strat = new sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za(args.cpu_info);
+      return new DepthwisePlanar<uint8_t, int8_t>(strat, args, qp);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
   {
     DepthwiseMethod::DEPTHFIRST,
     "sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst",
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp
new file mode 100644
index 0000000..a200ebf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(const float *const *const input_ptrs, float *const *const outptrs, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int n_tile_rows, const unsigned int n_tile_cols, const float *inptr, int64_t ld_input_row, int64_t ld_input_col, float *outptr, int64_t ld_output_row, int64_t ld_output_col, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+
+class sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float>
+{
+  private:
+  using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>;
+  Parent::IndirectKernelType m_indirect_kernel = sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl;
+  Parent::DirectKernelType m_direct_kernel = sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl;
+
+  public:
+  using return_type = float;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  constexpr static unsigned int kernel_rows = 3;
+  constexpr static unsigned int kernel_cols = 3;
+
+  constexpr static unsigned int stride_rows = 1;
+  constexpr static unsigned int stride_cols = 1;
+
+  constexpr static unsigned int output_rows = 2;
+  constexpr static unsigned int output_cols = 2;
+
+  sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *)
+  : Parent(output_rows, output_cols, kernel_rows, kernel_cols, stride_rows, stride_cols) {}
+
+  arm_gemm::VLType get_vl_type(void) const override { return vl_type; }
+
+  Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; }
+  Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
new file mode 100644
index 0000000..4c42478
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
+  const unsigned int n_tile_rows,
+  const unsigned int n_tile_cols,
+  const float *inptr,
+  int64_t ld_input_row,
+  int64_t ld_input_col,
+  float *outptr,
+  int64_t ld_output_row,
+  int64_t ld_output_col,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    const uint64_t n_tile_rows, n_tile_cols;
+    const float *inptr;
+    const uint64_t ld_input_row;
+    const uint64_t ld_input_col;
+    float *outptr;
+    const uint64_t ld_output_row;
+    const uint64_t ld_output_col;
+    const void *params;
+    const float min, max;
+
+    uint64_t tile_i = 0, tile_j = 0;
+
+    Args(
+      const unsigned int n_tile_rows,
+      const unsigned int n_tile_cols,
+      const float *inptr,
+      int64_t ld_input_row,
+      int64_t ld_input_col,
+      float *outptr,
+      int64_t ld_output_row,
+      int64_t ld_output_col,
+      const void *params,
+      const float activation_min,
+      const float activation_max
+    ) : n_tile_rows(n_tile_rows), n_tile_cols(n_tile_cols), inptr(inptr),
+        ld_input_row(ld_input_row), ld_input_col(ld_input_col), outptr(outptr),
+        ld_output_row(ld_output_row), ld_output_col(ld_output_col),
+        params(params), min(activation_min), max(activation_max)
+    {
+    }
+  };
+
+  Args params_struct(
+    n_tile_rows, n_tile_cols,
+    inptr, ld_input_row, ld_input_col,
+    outptr, ld_output_row, ld_output_col,
+    params, activation_min, activation_max
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "mov x5, #0x0\n"
+    "mov x6, #0x0\n"
+    "1:"  // Tile loop
+    "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "mov x21, #0x2\n"
+    "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+    "mul x19, x5, x20\n"  // offset = tile_i * ld_input_row
+    "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+    "madd x19, x6, x7, x19\n"  // offset += tile_j * ld_input_col
+    "mul x19, x19, x21\n"  // offset *= kernel_stride * output_size
+    "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
+    "add x8, x8, x19, LSL #2\n"  // inptr[0] += offset * sizeof(float)
+    "add x17, x8, x20, LSL #2\n"
+    "add x16, x17, x20, LSL #2\n"
+    "add x15, x7, x7\n"
+    "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+    "add x13, x16, x20, LSL #2\n"
+    "add x12, x15, x7\n"
+    "cbnz x6, 2f\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "sub x20, x19, x6\n"
+    "sub x20, x20, #0x1\n"
+    "lsl x11, %x[n_channels], #0x2\n"
+    "mov x19, #0x8\n"
+    "and x20, x20, #0x3fffff\n"
+    "mul x19, x19, x7\n"
+    "orr x11, x11, x20, LSL #22\n"
+    "orr x11, x11, x19, LSL #38\n"
+    "add x10, x17, x7, LSL #2\n"
+    "add x9, x8, x12, LSL #2\n"
+    "add x28, x17, x15, LSL #2\n"
+    "add x27, x16, x7, LSL #2\n"
+    "add x26, x13, x12, LSL #2\n"
+    "add x25, x8, x7, LSL #2\n"
+    "add x24, x8, x15, LSL #2\n"
+    "add x23, x16, x15, LSL #2\n"
+    "add x22, x17, x12, LSL #2\n"
+    "add x21, x16, x12, LSL #2\n"
+    "add x20, x13, x7, LSL #2\n"
+    "add x19, x13, x15, LSL #2\n"
+    ".inst 0xf8ab495a  // rprfm pldonce, x10, [x11]\n"
+    ".inst 0xf8ab491a  // rprfm pldonce, x8, [x11]\n"
+    ".inst 0xf8ab493a  // rprfm pldonce, x9, [x11]\n"
+    ".inst 0xf8ab4b9a  // rprfm pldonce, x28, [x11]\n"
+    ".inst 0xf8ab4b7a  // rprfm pldonce, x27, [x11]\n"
+    ".inst 0xf8ab49ba  // rprfm pldonce, x13, [x11]\n"
+    ".inst 0xf8ab4b5a  // rprfm pldonce, x26, [x11]\n"
+    ".inst 0xf8ab4b3a  // rprfm pldonce, x25, [x11]\n"
+    ".inst 0xf8ab4b1a  // rprfm pldonce, x24, [x11]\n"
+    ".inst 0xf8ab4afa  // rprfm pldonce, x23, [x11]\n"
+    ".inst 0xf8ab4a3a  // rprfm pldonce, x17, [x11]\n"
+    ".inst 0xf8ab4ada  // rprfm pldonce, x22, [x11]\n"
+    ".inst 0xf8ab4a1a  // rprfm pldonce, x16, [x11]\n"
+    ".inst 0xf8ab4aba  // rprfm pldonce, x21, [x11]\n"
+    ".inst 0xf8ab4a9a  // rprfm pldonce, x20, [x11]\n"
+    ".inst 0xf8ab4a7a  // rprfm pldonce, x19, [x11]\n"
+    "2:"  // Tile loop: Prefetch input rows: End
+    "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+    "mul x20, x5, x21\n"  // offset = tile_i * ld_output_row
+    "mov x19, #0x2\n"
+    "ld1w { z18.s }, p3/Z, [x14]\n"
+    "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+    "madd x20, x6, x24, x20\n"  // offset += tile_j * ld_output_col
+    "addvl x14, x14, #1\n"
+    ".inst 0xa040c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+    "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+    "mul x20, x20, x19\n"  // offset *= output_tile_size
+    "cntw x22\n"
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "addvl x14, x14, #4\n"
+    "add x23, x23, x20, LSL #2\n"  // outptrs[0] += offset * sizeof(float)
+    ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    "addvl x14, x14, #4\n"
+    "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "cmp x22, %x[n_channels]\n"
+    "add x21, x23, x21, LSL #2\n"
+    "ld1w { z8.s }, p3/Z, [x14]\n"
+    "mov x20, #0x0\n"
+    "sub x19, XZR, x22\n"
+    "ld1w { z9.s }, p2/Z, [x17, x7, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x8]\n"
+    "addvl x14, x14, #1\n"
+    "ld1w { z11.s }, p2/Z, [x8, x12, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x17, x15, LSL #2]\n"
+    "ld1w { z13.s }, p2/Z, [x16, x7, LSL #2]\n"
+    "bge 4f\n"
+    "3:"  // Tile loop: Channel loop
+    "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
+    "whilelt p1.s, x22, %x[n_channels]\n"
+    "incw x20\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x13]\n"
+    "incw x22\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x12, LSL #2]\n"
+    "mov p0.b, p2.b\n"
+    "fmla z30.s, p3/M, z2.s, z12.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "incw x19\n"
+    "fmla z28.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
+    "fmla z30.s, p3/M, z6.s, z9.s\n"
+    "fmla z31.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z9.s }, p2/Z, [x8, x15, LSL #2]\n"
+    "addvl x8, x8, #1\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z6.s, z13.s\n"
+    "ld1w { z18.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "fmla z30.s, p3/M, z4.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x17]\n"
+    "fmla z28.s, p3/M, z1.s, z12.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x17, x12, LSL #2]\n"
+    "addvl x17, x17, #1\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z29.s, p3/M, z1.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x16]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x12, LSL #2]\n"
+    "addvl x16, x16, #1\n"
+    "fmla z30.s, p3/M, z3.s, z9.s\n"
+    "fmla z31.s, p3/M, z5.s, z10.s\n"
+    "ld1w { z13.s }, p1/Z, [x16, x7, LSL #2]\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "fmla z30.s, p3/M, z7.s, z11.s\n"
+    "fmla z31.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
+    "whilelt p2.s, x20, %x[n_channels]\n"
+    "fmla z28.s, p3/M, z6.s, z9.s\n"
+    "fmla z29.s, p3/M, z8.s, z10.s\n"
+    ".inst 0xa040c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+    "addvl x14, x14, #4\n"
+    "fmla z30.s, p3/M, z8.s, z12.s\n"
+    "fmla z31.s, p3/M, z7.s, z12.s\n"
+    ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+    "addvl x14, x14, #4\n"
+    "cmp x22, %x[n_channels]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "addvl x13, x13, #1\n"
+    "ld1w { z9.s }, p1/Z, [x17, x7, LSL #2]\n"
+    "ld1w { z10.s }, p1/Z, [x8]\n"
+    "st1w { z28.s }, p0, [x23]\n"
+    "ld1w { z11.s }, p1/Z, [x8, x12, LSL #2]\n"
+    "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+    "addvl x23, x23, #1\n"
+    "ld1w { z12.s }, p1/Z, [x17, x15, LSL #2]\n"
+    "st1w { z30.s }, p0, [x21]\n"
+    "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+    "addvl x21, x21, #1\n"
+    "ld1w { z8.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "blt 3b\n"
+    "4:"  // Tile loop: Channel tail
+    "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
+    "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "add x6, x6, #0x1\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x13]\n"
+    "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x12, LSL #2]\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "fmla z30.s, p3/M, z2.s, z12.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+    "fmla z28.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
+    "cmp x6, x19\n"
+    "fmla z30.s, p3/M, z6.s, z9.s\n"
+    "fmla z31.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z9.s }, p2/Z, [x8, x15, LSL #2]\n"
+    "add x19, x5, #0x1\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z6.s, z13.s\n"
+    "csel x5, x5, x19, LT\n"
+    "mov p0.b, p2.b\n"
+    "fmla z30.s, p3/M, z4.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x17]\n"
+    "csel x6, x6, XZR, LT\n"
+    "fmla z28.s, p3/M, z1.s, z12.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x17, x12, LSL #2]\n"
+    "cmp x5, x20\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z29.s, p3/M, z1.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x16]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x12, LSL #2]\n"
+    "fmla z30.s, p3/M, z3.s, z9.s\n"
+    "fmla z31.s, p3/M, z5.s, z10.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "fmla z30.s, p3/M, z7.s, z11.s\n"
+    "fmla z31.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z9.s\n"
+    "fmla z29.s, p3/M, z8.s, z10.s\n"
+    "fmla z30.s, p3/M, z8.s, z12.s\n"
+    "fmla z31.s, p3/M, z7.s, z12.s\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z28.s }, p0, [x23]\n"
+    "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+    "st1w { z30.s }, p0, [x21]\n"
+    "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+    "blt 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
new file mode 100644
index 0000000..5fc6602
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
+  const float *const *const input_ptrs,
+  float *const *const outptrs,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    float *const *outptrs;
+    const void *params;
+    const float min, max;
+    const float *inptrs[16];
+
+    Args(
+      const float *const *const input_ptrs,
+      float *const *const outptrs,
+      const void *const params,
+      const float min,
+      const float max
+    ) : outptrs(outptrs), params(params), min(min), max(max)
+    {
+      inptrs[0] = input_ptrs[5];
+      inptrs[1] = input_ptrs[0];
+      inptrs[2] = input_ptrs[3];
+      inptrs[3] = input_ptrs[6];
+      inptrs[4] = input_ptrs[9];
+      inptrs[5] = input_ptrs[12];
+      inptrs[6] = input_ptrs[15];
+      inptrs[7] = input_ptrs[1];
+      inptrs[8] = input_ptrs[2];
+      inptrs[9] = input_ptrs[10];
+      inptrs[10] = input_ptrs[4];
+      inptrs[11] = input_ptrs[7];
+      inptrs[12] = input_ptrs[8];
+      inptrs[13] = input_ptrs[11];
+      inptrs[14] = input_ptrs[13];
+      inptrs[15] = input_ptrs[14];
+
+    }
+  };
+
+  Args params_struct(input_ptrs, outptrs, params,
+                     activation_min, activation_max);
+
+  __asm__ __volatile__(
+    "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+    "ptrue p3.b\n"
+    "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "ld1w { z18.s }, p3/Z, [x13]\n"
+    "addvl x13, x13, #1\n"
+    "ldp x12, x11, [x19, #0x0]\n"
+    "cntw x10\n"
+    ".inst 0xa040c1a0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x13]\n"
+    "addvl x13, x13, #4\n"
+    "ldp x9, x28, [x19, #0x10]\n"
+    "mov x27, #0x0\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    ".inst 0xa040c1a4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x13]\n"
+    "ldp x26, x25, [x14, #0x0]\n"
+    "addvl x13, x13, #4\n"
+    "cmp x10, %x[n_channels]\n"
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ldp x24, x21, [x14, #0x10]\n"
+    "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "sub x23, XZR, x10\n"
+    "ldr x22, [x14, #0x20]\n"
+    "ld1w { z8.s }, p3/Z, [x13]\n"
+    "addvl x13, x13, #1\n"
+    "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
+    "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z13.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "bge 2f\n"
+    "1:"  // Channel loop
+    "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
+    "ldr x21, [x14, #0x28]\n"
+    "whilelt p1.s, x10, %x[n_channels]\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ldr x20, [x14, #0x30]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z11.s\n"
+    "ldr x19, [x14, #0x38]\n"
+    "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fmla z30.s, p3/M, z2.s, z12.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ldr x25, [x14, #0x48]\n"
+    "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
+    "fmla z28.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+    "ldr x26, [x14, #0x40]\n"
+    "fmla z30.s, p3/M, z6.s, z9.s\n"
+    "fmla z31.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+    "ldr x24, [x14, #0x50]\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z6.s, z13.s\n"
+    "ldr x21, [x14, #0x58]\n"
+    "ld1w { z18.s }, p3/Z, [x13]\n"
+    "fmla z30.s, p3/M, z4.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+    "ldr x22, [x14, #0x60]\n"
+    "fmla z28.s, p3/M, z1.s, z12.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ldr x21, [x14, #0x68]\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x14, #0x70]\n"
+    "addvl x13, x13, #1\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z29.s, p3/M, z1.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ldr x19, [x14, #0x78]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "ldp x26, x25, [x14, #0x0]\n"
+    "incw x23\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ldp x24, x21, [x14, #0x10]\n"
+    "fmla z30.s, p3/M, z3.s, z9.s\n"
+    "fmla z31.s, p3/M, z5.s, z10.s\n"
+    "ldr x22, [x14, #0x20]\n"
+    "ld1w { z13.s }, p1/Z, [x22, x10, LSL #2]\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "mov p0.b, p2.b\n"
+    "fmla z30.s, p3/M, z7.s, z11.s\n"
+    "fmla z31.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+    "incw x27\n"
+    "fmla z28.s, p3/M, z6.s, z9.s\n"
+    "fmla z29.s, p3/M, z8.s, z10.s\n"
+    "ld1w { z9.s }, p1/Z, [x26, x10, LSL #2]\n"
+    "whilelt p2.s, x27, %x[n_channels]\n"
+    "fmla z30.s, p3/M, z8.s, z12.s\n"
+    "fmla z31.s, p3/M, z7.s, z12.s\n"
+    "ld1w { z10.s }, p1/Z, [x25, x10, LSL #2]\n"
+    "ld1w { z11.s }, p1/Z, [x24, x10, LSL #2]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z28.s }, p0, [x12, x23, LSL #2]\n"
+    "ld1w { z12.s }, p1/Z, [x21, x10, LSL #2]\n"
+    "incw x10\n"
+    "cmp x10, %x[n_channels]\n"
+    "st1w { z29.s }, p0, [x11, x23, LSL #2]\n"
+    ".inst 0xa040c1a0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x13]\n"
+    "addvl x13, x13, #4\n"
+    "st1w { z30.s }, p0, [x9, x23, LSL #2]\n"
+    ".inst 0xa040c1a4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x13]\n"
+    "addvl x13, x13, #4\n"
+    "st1w { z31.s }, p0, [x28, x23, LSL #2]\n"
+    "ld1w { z8.s }, p3/Z, [x13]\n"
+    "addvl x13, x13, #1\n"
+    "blt 1b\n"
+    "2:"  // Channel tail
+    "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
+    "ldr x21, [x14, #0x28]\n"
+    "incw x23\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ldr x20, [x14, #0x30]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z11.s\n"
+    "ldr x19, [x14, #0x38]\n"
+    "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fmla z30.s, p3/M, z2.s, z12.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ldr x25, [x14, #0x48]\n"
+    "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
+    "fmla z28.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+    "ldr x26, [x14, #0x40]\n"
+    "fmla z30.s, p3/M, z6.s, z9.s\n"
+    "fmla z31.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+    "ldr x24, [x14, #0x50]\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z6.s, z13.s\n"
+    "ldr x21, [x14, #0x58]\n"
+    "mov p0.b, p2.b\n"
+    "fmla z30.s, p3/M, z4.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+    "ldr x22, [x14, #0x60]\n"
+    "fmla z28.s, p3/M, z1.s, z12.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ldr x21, [x14, #0x68]\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x14, #0x70]\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z29.s, p3/M, z1.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ldr x19, [x14, #0x78]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "fmla z30.s, p3/M, z3.s, z9.s\n"
+    "fmla z31.s, p3/M, z5.s, z10.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "fmla z30.s, p3/M, z7.s, z11.s\n"
+    "fmla z31.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z9.s\n"
+    "fmla z29.s, p3/M, z8.s, z10.s\n"
+    "fmla z30.s, p3/M, z8.s, z12.s\n"
+    "fmla z31.s, p3/M, z7.s, z12.s\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z28.s }, p0, [x12, x23, LSL #2]\n"
+    "st1w { z29.s }, p0, [x11, x23, LSL #2]\n"
+    "st1w { z30.s }, p0, [x9, x23, LSL #2]\n"
+    "st1w { z31.s }, p0, [x28, x23, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp
new file mode 100644
index 0000000..cf74f43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(const float *const *const input_ptrs, float *const *const outptrs, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(const unsigned int n_tile_rows, const unsigned int n_tile_cols, const float *inptr, int64_t ld_input_row, int64_t ld_input_col, float *outptr, int64_t ld_output_row, int64_t ld_output_col, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+
+class sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float>
+{
+  private:
+  using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>;
+  Parent::IndirectKernelType m_indirect_kernel = sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl;
+  Parent::DirectKernelType m_direct_kernel = sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl;
+
+  public:
+  using return_type = float;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  constexpr static unsigned int kernel_rows = 3;
+  constexpr static unsigned int kernel_cols = 3;
+
+  constexpr static unsigned int stride_rows = 1;
+  constexpr static unsigned int stride_cols = 1;
+
+  constexpr static unsigned int output_rows = 3;
+  constexpr static unsigned int output_cols = 3;
+
+  sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *)
+  : Parent(output_rows, output_cols, kernel_rows, kernel_cols, stride_rows, stride_cols) {}
+
+  arm_gemm::VLType get_vl_type(void) const override { return vl_type; }
+
+  Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; }
+  Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
new file mode 100644
index 0000000..8ff0fe4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
+  const unsigned int n_tile_rows,
+  const unsigned int n_tile_cols,
+  const float *inptr,
+  int64_t ld_input_row,
+  int64_t ld_input_col,
+  float *outptr,
+  int64_t ld_output_row,
+  int64_t ld_output_col,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    const uint64_t n_tile_rows, n_tile_cols;
+    const float *inptr;
+    const uint64_t ld_input_row;
+    const uint64_t ld_input_col;
+    float *outptr;
+    const uint64_t ld_output_row;
+    const uint64_t ld_output_col;
+    const void *params;
+    const float min, max;
+
+    uint64_t tile_i = 0, tile_j = 0;
+
+    Args(
+      const unsigned int n_tile_rows,
+      const unsigned int n_tile_cols,
+      const float *inptr,
+      int64_t ld_input_row,
+      int64_t ld_input_col,
+      float *outptr,
+      int64_t ld_output_row,
+      int64_t ld_output_col,
+      const void *params,
+      const float activation_min,
+      const float activation_max
+    ) : n_tile_rows(n_tile_rows), n_tile_cols(n_tile_cols), inptr(inptr),
+        ld_input_row(ld_input_row), ld_input_col(ld_input_col), outptr(outptr),
+        ld_output_row(ld_output_row), ld_output_col(ld_output_col),
+        params(params), min(activation_min), max(activation_max)
+    {
+    }
+  };
+
+  Args params_struct(
+    n_tile_rows, n_tile_cols,
+    inptr, ld_input_row, ld_input_col,
+    outptr, ld_output_row, ld_output_col,
+    params, activation_min, activation_max
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "mov x2, #0x0\n"
+    "mov x3, #0x0\n"
+    "1:"  // Tile loop
+    "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "mov x21, #0x3\n"
+    "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+    "mul x19, x2, x20\n"  // offset = tile_i * ld_input_row
+    "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+    "madd x19, x3, x4, x19\n"  // offset += tile_j * ld_input_col
+    "mul x19, x19, x21\n"  // offset *= kernel_stride * output_size
+    "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+    "add x5, x5, x19, LSL #2\n"  // inptr[0] += offset * sizeof(float)
+    "add x6, x5, x20, LSL #2\n"
+    "add x7, x6, x20, LSL #2\n"
+    "add x8, x4, x4\n"
+    "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+    "add x16, x7, x20, LSL #2\n"
+    "add x15, x8, x4\n"
+    "add x14, x16, x20, LSL #2\n"
+    "add x13, x15, x4\n"
+    "cbnz x3, 2f\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "sub x20, x19, x3\n"
+    "sub x20, x20, #0x1\n"
+    "lsl x12, %x[n_channels], #0x2\n"
+    "mov x19, #0xc\n"
+    "and x20, x20, #0x3fffff\n"
+    "mul x19, x19, x4\n"
+    "orr x12, x12, x20, LSL #22\n"
+    "orr x12, x12, x19, LSL #38\n"
+    "add x25, x7, x8, LSL #2\n"
+    "add x24, x5, x13, LSL #2\n"
+    "add x23, x6, x8, LSL #2\n"
+    "add x22, x14, x13, LSL #2\n"
+    "add x21, x7, x4, LSL #2\n"
+    "add x20, x5, x4, LSL #2\n"
+    "add x19, x5, x15, LSL #2\n"
+    "add x11, x7, x15, LSL #2\n"
+    "add x10, x6, x13, LSL #2\n"
+    "add x9, x16, x8, LSL #2\n"
+    "add x28, x16, x13, LSL #2\n"
+    "add x27, x14, x4, LSL #2\n"
+    "add x26, x6, x4, LSL #2\n"
+    ".inst 0xf8ac4b3a  // rprfm pldonce, x25, [x12]\n"
+    "add x25, x6, x15, LSL #2\n"
+    ".inst 0xf8ac48ba  // rprfm pldonce, x5, [x12]\n"
+    ".inst 0xf8ac4b1a  // rprfm pldonce, x24, [x12]\n"
+    "add x24, x14, x15, LSL #2\n"
+    ".inst 0xf8ac49da  // rprfm pldonce, x14, [x12]\n"
+    ".inst 0xf8ac4afa  // rprfm pldonce, x23, [x12]\n"
+    "add x23, x16, x4, LSL #2\n"
+    ".inst 0xf8ac4ada  // rprfm pldonce, x22, [x12]\n"
+    "add x22, x5, x8, LSL #2\n"
+    ".inst 0xf8ac4aba  // rprfm pldonce, x21, [x12]\n"
+    "add x21, x16, x15, LSL #2\n"
+    ".inst 0xf8ac4a9a  // rprfm pldonce, x20, [x12]\n"
+    "add x20, x7, x13, LSL #2\n"
+    ".inst 0xf8ac4a7a  // rprfm pldonce, x19, [x12]\n"
+    "add x19, x14, x8, LSL #2\n"
+    ".inst 0xf8ac497a  // rprfm pldonce, x11, [x12]\n"
+    ".inst 0xf8ac48da  // rprfm pldonce, x6, [x12]\n"
+    ".inst 0xf8ac495a  // rprfm pldonce, x10, [x12]\n"
+    ".inst 0xf8ac4a1a  // rprfm pldonce, x16, [x12]\n"
+    ".inst 0xf8ac493a  // rprfm pldonce, x9, [x12]\n"
+    ".inst 0xf8ac4b9a  // rprfm pldonce, x28, [x12]\n"
+    ".inst 0xf8ac4b7a  // rprfm pldonce, x27, [x12]\n"
+    ".inst 0xf8ac4b5a  // rprfm pldonce, x26, [x12]\n"
+    ".inst 0xf8ac4b3a  // rprfm pldonce, x25, [x12]\n"
+    ".inst 0xf8ac4b1a  // rprfm pldonce, x24, [x12]\n"
+    ".inst 0xf8ac4afa  // rprfm pldonce, x23, [x12]\n"
+    ".inst 0xf8ac4ada  // rprfm pldonce, x22, [x12]\n"
+    ".inst 0xf8ac4aba  // rprfm pldonce, x21, [x12]\n"
+    ".inst 0xf8ac48fa  // rprfm pldonce, x7, [x12]\n"
+    ".inst 0xf8ac4a9a  // rprfm pldonce, x20, [x12]\n"
+    ".inst 0xf8ac4a7a  // rprfm pldonce, x19, [x12]\n"
+    "2:"  // Tile loop: Prefetch input rows: End
+    "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+    "mul x20, x2, x21\n"  // offset = tile_i * ld_output_row
+    "mov x19, #0x3\n"
+    "ld1w { z18.s }, p3/Z, [x17]\n"
+    "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+    "madd x20, x3, x26, x20\n"  // offset += tile_j * ld_output_col
+    "mul x20, x20, x19\n"  // offset *= output_tile_size
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ldr x25, [%x[params_struct], %[offsetof_args_outptr]]\n"
+    "addvl x17, x17, #1\n"
+    "add x25, x25, x20, LSL #2\n"  // outptrs[0] += offset * sizeof(float)
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "cntw x24\n"
+    "addvl x17, x17, #4\n"
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "add x23, x25, x21, LSL #2\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "addvl x17, x17, #4\n"
+    "cmp x24, %x[n_channels]\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "add x22, x23, x21, LSL #2\n"
+    "add x21, x26, x26\n"
+    "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
+    "mov x20, #0x0\n"
+    "sub x19, XZR, x24\n"
+    "ld1w { z10.s }, p2/Z, [x5]\n"
+    "ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
+    "addvl x17, x17, #1\n"
+    "ld1w { z12.s }, p2/Z, [x14]\n"
+    "ld1w { z13.s }, p2/Z, [x6, x8, LSL #2]\n"
+    "bge 4f\n"
+    "3:"  // Tile loop: Channel loop
+    "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
+    "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
+    "whilelt p1.s, x24, %x[n_channels]\n"
+    "incw x20\n"
+    "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
+    "fmla z24.s, p3/M, z4.s, z13.s\n"
+    "incw x24\n"
+    "mov p0.b, p2.b\n"
+    "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
+    "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
+    "incw x19\n"
+    "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
+    "fmla z23.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x7, x15, LSL #2]\n"
+    "fmla z25.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "fmla z23.s, p3/M, z5.s, z13.s\n"
+    "fmla z25.s, p3/M, z3.s, z13.s\n"
+    "fmla z26.s, p3/M, z2.s, z13.s\n"
+    "fmla z27.s, p3/M, z1.s, z13.s\n"
+    "fmla z28.s, p3/M, z0.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x5, x4, LSL #2]\n"
+    "fmla z29.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "fmla z24.s, p3/M, z0.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z12.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z26.s, p3/M, z4.s, z11.s\n"
+    "ld1w { z18.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "fmla z27.s, p3/M, z3.s, z11.s\n"
+    "fmla z29.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x6]\n"
+    "fmla z24.s, p3/M, z2.s, z12.s\n"
+    "fmla z25.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x16]\n"
+    "fmla z28.s, p3/M, z4.s, z10.s\n"
+    "fmla z23.s, p3/M, z1.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z2.s, z10.s\n"
+    "fmla z31.s, p3/M, z1.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z7.s, z10.s\n"
+    "fmla z27.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "fmla z28.s, p3/M, z2.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "fmla z30.s, p3/M, z4.s, z10.s\n"
+    "fmla z31.s, p3/M, z3.s, z10.s\n"
+    "fmla z23.s, p3/M, z3.s, z11.s\n"
+    "fmla z25.s, p3/M, z5.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "fmla z26.s, p3/M, z6.s, z12.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z10.s\n"
+    "fmla z29.s, p3/M, z5.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x4, LSL #2]\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z6.s, z13.s\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z28.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "addvl x6, x6, #1\n"
+    "fmla z29.s, p3/M, z7.s, z13.s\n"
+    "fmla z24.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z27.s, p3/M, z0.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z30.s, p3/M, z8.s, z13.s\n"
+    "fmla z31.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z24.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "addvl x16, x16, #1\n"
+    "fmla z25.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x5, x8, LSL #2]\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "addvl x5, x5, #1\n"
+    "fmla z30.s, p3/M, z3.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z10.s }, p1/Z, [x5]\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "fmla z26.s, p3/M, z7.s, z12.s\n"
+    "fmla z27.s, p3/M, z6.s, z12.s\n"
+    "fmla z24.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x7]\n"
+    "fmla z25.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "addvl x7, x7, #1\n"
+    "fmla z30.s, p3/M, z5.s, z13.s\n"
+    "fmla z23.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z27.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z26.s, p3/M, z3.s, z12.s\n"
+    "whilelt p2.s, x20, %x[n_channels]\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    "fmla z29.s, p3/M, z8.s, z13.s\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "addvl x14, x14, #1\n"
+    "cmp x24, %x[n_channels]\n"
+    "fmla z31.s, p3/M, z6.s, z13.s\n"
+    "fmax z23.s, p3/M, z23.s, z17.s\n"
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    "fmin z23.s, p3/M, z23.s, z16.s\n"
+    ".inst 0xc1b0ca38  // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
+    "ld1w { z11.s }, p1/Z, [x5, x13, LSL #2]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "ld1w { z12.s }, p1/Z, [x14]\n"
+    "st1w { z23.s }, p0, [x25]\n"
+    "ld1w { z13.s }, p1/Z, [x6, x8, LSL #2]\n"
+    "st1w { z24.s }, p0, [x25, x26, LSL #2]\n"
+    "st1w { z25.s }, p0, [x25, x21, LSL #2]\n"
+    "addvl x25, x25, #1\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "st1w { z26.s }, p0, [x23]\n"
+    "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+    "st1w { z28.s }, p0, [x23, x21, LSL #2]\n"
+    "addvl x23, x23, #1\n"
+    "st1w { z29.s }, p0, [x22]\n"
+    "st1w { z30.s }, p0, [x22, x26, LSL #2]\n"
+    "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+    "addvl x22, x22, #1\n"
+    "blt 3b\n"
+    "4:"  // Tile loop: Channel tail
+    "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
+    "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
+    "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "add x3, x3, #0x1\n"
+    "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
+    "fmla z24.s, p3/M, z4.s, z13.s\n"
+    "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "add x20, x2, #0x1\n"
+    "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
+    "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "cmp x3, x19\n"
+    "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
+    "fmla z23.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x7, x15, LSL #2]\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+    "fmla z25.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
+    "csel x2, x2, x20, LT\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "mov p0.b, p2.b\n"
+    "csel x3, x3, XZR, LT\n"
+    "fmla z23.s, p3/M, z5.s, z13.s\n"
+    "fmla z25.s, p3/M, z3.s, z13.s\n"
+    "cmp x2, x19\n"
+    "fmla z26.s, p3/M, z2.s, z13.s\n"
+    "fmla z27.s, p3/M, z1.s, z13.s\n"
+    "fmla z28.s, p3/M, z0.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x5, x4, LSL #2]\n"
+    "fmla z29.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "fmla z24.s, p3/M, z0.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z12.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z26.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z3.s, z11.s\n"
+    "fmla z29.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x6]\n"
+    "fmla z24.s, p3/M, z2.s, z12.s\n"
+    "fmla z25.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x16]\n"
+    "fmla z28.s, p3/M, z4.s, z10.s\n"
+    "fmla z23.s, p3/M, z1.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z2.s, z10.s\n"
+    "fmla z31.s, p3/M, z1.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z7.s, z10.s\n"
+    "fmla z27.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "fmla z28.s, p3/M, z2.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "fmla z30.s, p3/M, z4.s, z10.s\n"
+    "fmla z31.s, p3/M, z3.s, z10.s\n"
+    "fmla z23.s, p3/M, z3.s, z11.s\n"
+    "fmla z25.s, p3/M, z5.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "fmla z26.s, p3/M, z6.s, z12.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z10.s\n"
+    "fmla z29.s, p3/M, z5.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x4, LSL #2]\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z6.s, z13.s\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z28.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "fmla z29.s, p3/M, z7.s, z13.s\n"
+    "fmla z24.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z27.s, p3/M, z0.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z30.s, p3/M, z8.s, z13.s\n"
+    "fmla z31.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z24.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "fmla z25.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x5, x8, LSL #2]\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "fmla z30.s, p3/M, z3.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z11.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "fmla z26.s, p3/M, z7.s, z12.s\n"
+    "fmla z27.s, p3/M, z6.s, z12.s\n"
+    "fmla z24.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x7]\n"
+    "fmla z25.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z30.s, p3/M, z5.s, z13.s\n"
+    "fmla z23.s, p3/M, z6.s, z12.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z27.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z26.s, p3/M, z3.s, z12.s\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z13.s\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z31.s, p3/M, z6.s, z13.s\n"
+    "fmax z23.s, p3/M, z23.s, z17.s\n"
+    "fmin z23.s, p3/M, z23.s, z16.s\n"
+    ".inst 0xc1b0ca38  // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
+    "st1w { z23.s }, p0, [x25]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z24.s }, p0, [x25, x26, LSL #2]\n"
+    "st1w { z25.s }, p0, [x25, x21, LSL #2]\n"
+    "st1w { z26.s }, p0, [x23]\n"
+    "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+    "st1w { z28.s }, p0, [x23, x21, LSL #2]\n"
+    "st1w { z29.s }, p0, [x22]\n"
+    "st1w { z30.s }, p0, [x22, x26, LSL #2]\n"
+    "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+    "blt 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
new file mode 100644
index 0000000..ab910c1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
+  const float *const *const input_ptrs,
+  float *const *const outptrs,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    float *const *outptrs;
+    const void *params;
+    const float min, max;
+    const float *inptrs[25];
+
+    Args(
+      const float *const *const input_ptrs,
+      float *const *const outptrs,
+      const void *const params,
+      const float min,
+      const float max
+    ) : outptrs(outptrs), params(params), min(min), max(max)
+    {
+      inptrs[0] = input_ptrs[12];
+      inptrs[1] = input_ptrs[0];
+      inptrs[2] = input_ptrs[4];
+      inptrs[3] = input_ptrs[20];
+      inptrs[4] = input_ptrs[7];
+      inptrs[5] = input_ptrs[24];
+      inptrs[6] = input_ptrs[11];
+      inptrs[7] = input_ptrs[1];
+      inptrs[8] = input_ptrs[3];
+      inptrs[9] = input_ptrs[13];
+      inptrs[10] = input_ptrs[5];
+      inptrs[11] = input_ptrs[9];
+      inptrs[12] = input_ptrs[15];
+      inptrs[13] = input_ptrs[17];
+      inptrs[14] = input_ptrs[19];
+      inptrs[15] = input_ptrs[21];
+      inptrs[16] = input_ptrs[6];
+      inptrs[17] = input_ptrs[8];
+      inptrs[18] = input_ptrs[23];
+      inptrs[19] = input_ptrs[16];
+      inptrs[20] = input_ptrs[2];
+      inptrs[21] = input_ptrs[18];
+      inptrs[22] = input_ptrs[10];
+      inptrs[23] = input_ptrs[14];
+      inptrs[24] = input_ptrs[22];
+
+    }
+  };
+
+  Args params_struct(input_ptrs, outptrs, params,
+                     activation_min, activation_max);
+
+  __asm__ __volatile__(
+    "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "ld1w { z18.s }, p3/Z, [x16]\n"
+    "addvl x16, x16, #1\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "cntw x10\n"
+    ".inst 0xa040c200  // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "ldr x9, [x15, #0x20]\n"
+    "mov x28, #0x0\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    ".inst 0xa040c204  // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "cmp x10, %x[n_channels]\n"
+    "ldr x27, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "sub x26, XZR, x10\n"
+    "ld1w { z8.s }, p3/Z, [x16]\n"
+    "addvl x16, x16, #1\n"
+    "ld1w { z9.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
+    "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
+    "ld1w { z13.s }, p2/Z, [x9, x28, LSL #2]\n"
+    "bge 2f\n"
+    "1:"  // Channel loop
+    "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
+    "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
+    "ldr x25, [x15, #0x30]\n"
+    "incw x26\n"
+    "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
+    "fmla z23.s, p3/M, z0.s, z10.s\n"
+    "ldr x24, [x15, #0x38]\n"
+    "mov p1.b, p2.b\n"
+    "fmla z24.s, p3/M, z4.s, z13.s\n"
+    "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
+    "ldr x23, [x15, #0x28]\n"
+    "whilelt p0.s, x10, %x[n_channels]\n"
+    "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
+    "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
+    "ldr x13, [x15, #0x48]\n"
+    "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
+    "fmla z25.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
+    "ldr x14, [x15, #0x40]\n"
+    "fmla z23.s, p3/M, z5.s, z13.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "ldr x12, [x15, #0x50]\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "fmla z25.s, p3/M, z3.s, z13.s\n"
+    "ldr x11, [x15, #0x58]\n"
+    "fmla z26.s, p3/M, z2.s, z13.s\n"
+    "fmla z27.s, p3/M, z1.s, z13.s\n"
+    "ldr x9, [x15, #0x60]\n"
+    "fmla z28.s, p3/M, z0.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmla z29.s, p3/M, z6.s, z12.s\n"
+    "ldr x25, [x15, #0x70]\n"
+    "ld1w { z12.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "ldr x23, [x15, #0x68]\n"
+    "fmla z24.s, p3/M, z0.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ldr x24, [x15, #0x78]\n"
+    "fmla z26.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z3.s, z11.s\n"
+    "ldr x14, [x15, #0x80]\n"
+    "ld1w { z18.s }, p3/Z, [x16]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z28.s, p3/M, z4.s, z10.s\n"
+    "ldr x13, [x15, #0x88]\n"
+    "addvl x16, x16, #1\n"
+    "fmla z29.s, p3/M, z1.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+    "ldr x12, [x15, #0x90]\n"
+    "fmla z24.s, p3/M, z2.s, z12.s\n"
+    "fmla z25.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z13.s }, p2/Z, [x11, x28, LSL #2]\n"
+    "ldr x11, [x15, #0x98]\n"
+    "ld1w { z12.s }, p2/Z, [x9, x28, LSL #2]\n"
+    "fmla z27.s, p3/M, z5.s, z10.s\n"
+    "fmla z30.s, p3/M, z2.s, z10.s\n"
+    "ldr x9, [x15, #0xa0]\n"
+    "fmla z26.s, p3/M, z0.s, z11.s\n"
+    "fmla z28.s, p3/M, z2.s, z13.s\n"
+    "ldr x22, [x27, #0x0]\n"
+    "fmla z24.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z7.s, z10.s\n"
+    "ldr x21, [x27, #0x8]\n"
+    "fmla z31.s, p3/M, z1.s, z10.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "ldr x23, [x15, #0xa8]\n"
+    "fmla z26.s, p3/M, z6.s, z12.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ldr x14, [x15, #0xc0]\n"
+    "fmla z28.s, p3/M, z6.s, z10.s\n"
+    "fmla z30.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x27, #0x10]\n"
+    "fmla z23.s, p3/M, z3.s, z11.s\n"
+    "fmla z25.s, p3/M, z5.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ldr x25, [x15, #0xb0]\n"
+    "fmla z29.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z3.s, z10.s\n"
+    "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "ldr x24, [x15, #0xb8]\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z28.s, p3/M, z8.s, z11.s\n"
+    "ldr x19, [x27, #0x18]\n"
+    "fmla z30.s, p3/M, z6.s, z13.s\n"
+    "fmla z24.s, p3/M, z3.s, z12.s\n"
+    "fmla z27.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
+    "fmla z29.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x12, x28, LSL #2]\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z24.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
+    "fmla z25.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z30.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x9, x28, LSL #2]\n"
+    "ldr x9, [x15, #0x20]\n"
+    "fmla z23.s, p3/M, z2.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z12.s\n"
+    "fmla z27.s, p3/M, z6.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "fmla z30.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "fmla z31.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "fmla z23.s, p3/M, z6.s, z12.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "fmla z24.s, p3/M, z1.s, z11.s\n"
+    "fmla z25.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmax z23.s, p3/M, z23.s, z17.s\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z30.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z27.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "fmla z26.s, p3/M, z3.s, z12.s\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "incw x28\n"
+    "fmin z23.s, p3/M, z23.s, z16.s\n"
+    "st1w { z23.s }, p1, [x22, x26, LSL #2]\n"
+    "ldr x22, [x27, #0x20]\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z13.s\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z9.s }, p0/Z, [x14, x10, LSL #2]\n"
+    "whilelt p2.s, x28, %x[n_channels]\n"
+    "fmla z31.s, p3/M, z6.s, z13.s\n"
+    ".inst 0xc1b0ca38  // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
+    "st1w { z24.s }, p1, [x21, x26, LSL #2]\n"
+    "ldr x21, [x27, #0x28]\n"
+    "st1w { z25.s }, p1, [x20, x26, LSL #2]\n"
+    "ldr x20, [x27, #0x30]\n"
+    "ld1w { z10.s }, p0/Z, [x13, x10, LSL #2]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z26.s }, p1, [x19, x26, LSL #2]\n"
+    "ldr x19, [x27, #0x38]\n"
+    "ld1w { z11.s }, p0/Z, [x12, x10, LSL #2]\n"
+    "st1w { z27.s }, p1, [x22, x26, LSL #2]\n"
+    "ldr x22, [x27, #0x40]\n"
+    "ld1w { z12.s }, p0/Z, [x11, x10, LSL #2]\n"
+    "ld1w { z13.s }, p0/Z, [x9, x10, LSL #2]\n"
+    "incw x10\n"
+    "cmp x10, %x[n_channels]\n"
+    "st1w { z28.s }, p1, [x21, x26, LSL #2]\n"
+    ".inst 0xa040c200  // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "st1w { z29.s }, p1, [x20, x26, LSL #2]\n"
+    ".inst 0xa040c204  // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "st1w { z30.s }, p1, [x19, x26, LSL #2]\n"
+    "st1w { z31.s }, p1, [x22, x26, LSL #2]\n"
+    "ld1w { z8.s }, p3/Z, [x16]\n"
+    "addvl x16, x16, #1\n"
+    "blt 1b\n"
+    "2:"  // Channel tail
+    "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
+    "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
+    "ldr x25, [x15, #0x30]\n"
+    "incw x26\n"
+    "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
+    "fmla z23.s, p3/M, z0.s, z10.s\n"
+    "ldr x24, [x15, #0x38]\n"
+    "mov p1.b, p2.b\n"
+    "fmla z24.s, p3/M, z4.s, z13.s\n"
+    "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
+    "ldr x23, [x15, #0x28]\n"
+    "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
+    "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
+    "ldr x13, [x15, #0x48]\n"
+    "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
+    "fmla z25.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
+    "ldr x14, [x15, #0x40]\n"
+    "fmla z23.s, p3/M, z5.s, z13.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "ldr x12, [x15, #0x50]\n"
+    "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "fmla z25.s, p3/M, z3.s, z13.s\n"
+    "ldr x11, [x15, #0x58]\n"
+    "fmla z26.s, p3/M, z2.s, z13.s\n"
+    "fmla z27.s, p3/M, z1.s, z13.s\n"
+    "ldr x9, [x15, #0x60]\n"
+    "fmla z28.s, p3/M, z0.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmla z29.s, p3/M, z6.s, z12.s\n"
+    "ldr x25, [x15, #0x70]\n"
+    "ld1w { z12.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "ldr x23, [x15, #0x68]\n"
+    "fmla z24.s, p3/M, z0.s, z13.s\n"
+    "fmla z31.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ldr x24, [x15, #0x78]\n"
+    "fmla z26.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z3.s, z11.s\n"
+    "ldr x14, [x15, #0x80]\n"
+    "fmla z30.s, p3/M, z0.s, z11.s\n"
+    "fmla z28.s, p3/M, z4.s, z10.s\n"
+    "ldr x13, [x15, #0x88]\n"
+    "fmla z29.s, p3/M, z1.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+    "ldr x12, [x15, #0x90]\n"
+    "fmla z24.s, p3/M, z2.s, z12.s\n"
+    "fmla z25.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z13.s }, p2/Z, [x11, x28, LSL #2]\n"
+    "ldr x11, [x15, #0x98]\n"
+    "ld1w { z12.s }, p2/Z, [x9, x28, LSL #2]\n"
+    "fmla z27.s, p3/M, z5.s, z10.s\n"
+    "fmla z30.s, p3/M, z2.s, z10.s\n"
+    "ldr x9, [x15, #0xa0]\n"
+    "fmla z26.s, p3/M, z0.s, z11.s\n"
+    "fmla z28.s, p3/M, z2.s, z13.s\n"
+    "ldr x22, [x27, #0x0]\n"
+    "fmla z24.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z7.s, z10.s\n"
+    "ldr x21, [x27, #0x8]\n"
+    "fmla z31.s, p3/M, z1.s, z10.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "ldr x23, [x15, #0xa8]\n"
+    "fmla z26.s, p3/M, z6.s, z12.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "ldr x14, [x15, #0xc0]\n"
+    "fmla z28.s, p3/M, z6.s, z10.s\n"
+    "fmla z30.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x27, #0x10]\n"
+    "fmla z23.s, p3/M, z3.s, z11.s\n"
+    "fmla z25.s, p3/M, z5.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ldr x25, [x15, #0xb0]\n"
+    "fmla z29.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z3.s, z10.s\n"
+    "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "ldr x24, [x15, #0xb8]\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z28.s, p3/M, z8.s, z11.s\n"
+    "ldr x19, [x27, #0x18]\n"
+    "fmla z30.s, p3/M, z6.s, z13.s\n"
+    "fmla z24.s, p3/M, z3.s, z12.s\n"
+    "fmla z27.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
+    "fmla z29.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x12, x28, LSL #2]\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z24.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
+    "fmla z25.s, p3/M, z4.s, z11.s\n"
+    "fmla z27.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z30.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x9, x28, LSL #2]\n"
+    "fmla z23.s, p3/M, z2.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z12.s\n"
+    "fmla z27.s, p3/M, z6.s, z12.s\n"
+    "fmla z29.s, p3/M, z4.s, z12.s\n"
+    "fmla z30.s, p3/M, z3.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "fmla z31.s, p3/M, z7.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x23, x28, LSL #2]\n"
+    "fmla z23.s, p3/M, z6.s, z12.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "fmla z24.s, p3/M, z1.s, z11.s\n"
+    "fmla z25.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmax z23.s, p3/M, z23.s, z17.s\n"
+    "fmla z28.s, p3/M, z7.s, z13.s\n"
+    "fmla z30.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z0.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z27.s, p3/M, z8.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x28, LSL #2]\n"
+    "fmla z26.s, p3/M, z3.s, z12.s\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmin z23.s, p3/M, z23.s, z16.s\n"
+    "st1w { z23.s }, p1, [x22, x26, LSL #2]\n"
+    "ldr x22, [x27, #0x20]\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z13.s\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z31.s, p3/M, z6.s, z13.s\n"
+    ".inst 0xc1b0ca38  // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
+    "st1w { z24.s }, p1, [x21, x26, LSL #2]\n"
+    "ldr x21, [x27, #0x28]\n"
+    "st1w { z25.s }, p1, [x20, x26, LSL #2]\n"
+    "ldr x20, [x27, #0x30]\n"
+    ".inst 0xc1b0ca3c  // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+    "st1w { z26.s }, p1, [x19, x26, LSL #2]\n"
+    "ldr x19, [x27, #0x38]\n"
+    "st1w { z27.s }, p1, [x22, x26, LSL #2]\n"
+    "ldr x22, [x27, #0x40]\n"
+    "st1w { z28.s }, p1, [x21, x26, LSL #2]\n"
+    "st1w { z29.s }, p1, [x20, x26, LSL #2]\n"
+    "st1w { z30.s }, p1, [x19, x26, LSL #2]\n"
+    "st1w { z31.s }, p1, [x22, x26, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp
new file mode 100644
index 0000000..9184cc0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(const float *const *const input_ptrs, float *const *const outptrs, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(const unsigned int n_tile_rows, const unsigned int n_tile_cols, const float *inptr, int64_t ld_input_row, int64_t ld_input_col, float *outptr, int64_t ld_output_row, int64_t ld_output_col, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+
+class sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float>
+{
+  private:
+  using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>;
+  Parent::IndirectKernelType m_indirect_kernel = sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl;
+  Parent::DirectKernelType m_direct_kernel = sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl;
+
+  public:
+  using return_type = float;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  constexpr static unsigned int kernel_rows = 3;
+  constexpr static unsigned int kernel_cols = 3;
+
+  constexpr static unsigned int stride_rows = 1;
+  constexpr static unsigned int stride_cols = 1;
+
+  constexpr static unsigned int output_rows = 4;
+  constexpr static unsigned int output_cols = 4;
+
+  sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *)
+  : Parent(output_rows, output_cols, kernel_rows, kernel_cols, stride_rows, stride_cols) {}
+
+  arm_gemm::VLType get_vl_type(void) const override { return vl_type; }
+
+  Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; }
+  Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
new file mode 100644
index 0000000..8ec7bcc
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -0,0 +1,672 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
+  const unsigned int n_tile_rows,
+  const unsigned int n_tile_cols,
+  const float *inptr,
+  int64_t ld_input_row,
+  int64_t ld_input_col,
+  float *outptr,
+  int64_t ld_output_row,
+  int64_t ld_output_col,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    const uint64_t n_tile_rows, n_tile_cols;
+    const float *inptr;
+    const uint64_t ld_input_row;
+    const uint64_t ld_input_col;
+    float *outptr;
+    const uint64_t ld_output_row;
+    const uint64_t ld_output_col;
+    const void *params;
+    const float min, max;
+
+    uint64_t tile_i = 0, tile_j = 0;
+
+    Args(
+      const unsigned int n_tile_rows,
+      const unsigned int n_tile_cols,
+      const float *inptr,
+      int64_t ld_input_row,
+      int64_t ld_input_col,
+      float *outptr,
+      int64_t ld_output_row,
+      int64_t ld_output_col,
+      const void *params,
+      const float activation_min,
+      const float activation_max
+    ) : n_tile_rows(n_tile_rows), n_tile_cols(n_tile_cols), inptr(inptr),
+        ld_input_row(ld_input_row), ld_input_col(ld_input_col), outptr(outptr),
+        ld_output_row(ld_output_row), ld_output_col(ld_output_col),
+        params(params), min(activation_min), max(activation_max)
+    {
+    }
+  };
+
+  Args params_struct(
+    n_tile_rows, n_tile_cols,
+    inptr, ld_input_row, ld_input_col,
+    outptr, ld_output_row, ld_output_col,
+    params, activation_min, activation_max
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "mov x2, #0x0\n"
+    "mov x3, #0x0\n"
+    "1:"  // Tile loop
+    "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "mov x21, #0x4\n"
+    "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+    "mul x19, x2, x20\n"  // offset = tile_i * ld_input_row
+    "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+    "madd x19, x3, x4, x19\n"  // offset += tile_j * ld_input_col
+    "mul x19, x19, x21\n"  // offset *= kernel_stride * output_size
+    "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+    "add x5, x5, x19, LSL #2\n"  // inptr[0] += offset * sizeof(float)
+    "add x6, x5, x20, LSL #2\n"
+    "add x7, x6, x20, LSL #2\n"
+    "add x8, x4, x4\n"
+    "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+    "add x16, x7, x20, LSL #2\n"
+    "add x15, x8, x4\n"
+    "add x14, x16, x20, LSL #2\n"
+    "add x13, x15, x4\n"
+    "add x12, x14, x20, LSL #2\n"
+    "add x11, x13, x4\n"
+    "cbnz x3, 2f\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "sub x20, x19, x3\n"
+    "sub x20, x20, #0x1\n"
+    "lsl x10, %x[n_channels], #0x2\n"
+    "mov x19, #0x10\n"
+    "and x20, x20, #0x3fffff\n"
+    "mul x19, x19, x4\n"
+    "orr x10, x10, x20, LSL #22\n"
+    "orr x10, x10, x19, LSL #38\n"
+    "add x26, x7, x8, LSL #2\n"
+    "add x25, x5, x11, LSL #2\n"
+    "add x24, x7, x15, LSL #2\n"
+    "add x23, x12, x11, LSL #2\n"
+    "add x22, x16, x8, LSL #2\n"
+    "add x21, x5, x4, LSL #2\n"
+    "add x20, x5, x13, LSL #2\n"
+    "add x19, x16, x15, LSL #2\n"
+    "add x9, x6, x11, LSL #2\n"
+    "add x28, x6, x8, LSL #2\n"
+    "add x27, x14, x11, LSL #2\n"
+    ".inst 0xf8aa4b5a  // rprfm pldonce, x26, [x10]\n"
+    "add x26, x6, x15, LSL #2\n"
+    ".inst 0xf8aa48ba  // rprfm pldonce, x5, [x10]\n"
+    ".inst 0xf8aa4b3a  // rprfm pldonce, x25, [x10]\n"
+    "add x25, x12, x4, LSL #2\n"
+    ".inst 0xf8aa4b1a  // rprfm pldonce, x24, [x10]\n"
+    "add x24, x7, x4, LSL #2\n"
+    ".inst 0xf8aa499a  // rprfm pldonce, x12, [x10]\n"
+    ".inst 0xf8aa4afa  // rprfm pldonce, x23, [x10]\n"
+    "add x23, x12, x13, LSL #2\n"
+    ".inst 0xf8aa4ada  // rprfm pldonce, x22, [x10]\n"
+    "add x22, x7, x13, LSL #2\n"
+    ".inst 0xf8aa4aba  // rprfm pldonce, x21, [x10]\n"
+    "add x21, x5, x8, LSL #2\n"
+    ".inst 0xf8aa4a9a  // rprfm pldonce, x20, [x10]\n"
+    "add x20, x16, x4, LSL #2\n"
+    ".inst 0xf8aa4a7a  // rprfm pldonce, x19, [x10]\n"
+    "add x19, x5, x15, LSL #2\n"
+    ".inst 0xf8aa48da  // rprfm pldonce, x6, [x10]\n"
+    ".inst 0xf8aa493a  // rprfm pldonce, x9, [x10]\n"
+    "add x9, x16, x13, LSL #2\n"
+    ".inst 0xf8aa49da  // rprfm pldonce, x14, [x10]\n"
+    ".inst 0xf8aa4b9a  // rprfm pldonce, x28, [x10]\n"
+    "add x28, x7, x11, LSL #2\n"
+    ".inst 0xf8aa4b7a  // rprfm pldonce, x27, [x10]\n"
+    "add x27, x14, x8, LSL #2\n"
+    ".inst 0xf8aa4b5a  // rprfm pldonce, x26, [x10]\n"
+    "add x26, x16, x11, LSL #2\n"
+    ".inst 0xf8aa4b3a  // rprfm pldonce, x25, [x10]\n"
+    "add x25, x12, x8, LSL #2\n"
+    ".inst 0xf8aa4b1a  // rprfm pldonce, x24, [x10]\n"
+    "add x24, x14, x15, LSL #2\n"
+    ".inst 0xf8aa4afa  // rprfm pldonce, x23, [x10]\n"
+    "add x23, x12, x15, LSL #2\n"
+    ".inst 0xf8aa4ada  // rprfm pldonce, x22, [x10]\n"
+    "add x22, x6, x4, LSL #2\n"
+    ".inst 0xf8aa4aba  // rprfm pldonce, x21, [x10]\n"
+    "add x21, x6, x13, LSL #2\n"
+    ".inst 0xf8aa4a9a  // rprfm pldonce, x20, [x10]\n"
+    "add x20, x14, x4, LSL #2\n"
+    ".inst 0xf8aa4a7a  // rprfm pldonce, x19, [x10]\n"
+    "add x19, x14, x13, LSL #2\n"
+    ".inst 0xf8aa48fa  // rprfm pldonce, x7, [x10]\n"
+    ".inst 0xf8aa493a  // rprfm pldonce, x9, [x10]\n"
+    ".inst 0xf8aa4b9a  // rprfm pldonce, x28, [x10]\n"
+    ".inst 0xf8aa4a1a  // rprfm pldonce, x16, [x10]\n"
+    ".inst 0xf8aa4b7a  // rprfm pldonce, x27, [x10]\n"
+    ".inst 0xf8aa4b5a  // rprfm pldonce, x26, [x10]\n"
+    ".inst 0xf8aa4b3a  // rprfm pldonce, x25, [x10]\n"
+    ".inst 0xf8aa4b1a  // rprfm pldonce, x24, [x10]\n"
+    ".inst 0xf8aa4afa  // rprfm pldonce, x23, [x10]\n"
+    ".inst 0xf8aa4ada  // rprfm pldonce, x22, [x10]\n"
+    ".inst 0xf8aa4aba  // rprfm pldonce, x21, [x10]\n"
+    ".inst 0xf8aa4a9a  // rprfm pldonce, x20, [x10]\n"
+    ".inst 0xf8aa4a7a  // rprfm pldonce, x19, [x10]\n"
+    "2:"  // Tile loop: Prefetch input rows: End
+    "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+    "mul x20, x2, x21\n"  // offset = tile_i * ld_output_row
+    "mov x19, #0x4\n"
+    "ld1w { z15.s }, p3/Z, [x17]\n"
+    "ldr x28, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+    "madd x20, x3, x28, x20\n"  // offset += tile_j * ld_output_col
+    "mul x20, x20, x19\n"  // offset *= output_tile_size
+    "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ldr x27, [%x[params_struct], %[offsetof_args_outptr]]\n"
+    "add x27, x27, x20, LSL #2\n"  // outptrs[0] += offset * sizeof(float)
+    "addvl x17, x17, #1\n"
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "add x26, x27, x21, LSL #2\n"
+    "cntw x25\n"
+    "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "addvl x17, x17, #4\n"
+    "add x24, x26, x21, LSL #2\n"
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "add x23, x28, x28\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
+    "addvl x17, x17, #4\n"
+    "cmp x25, %x[n_channels]\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "add x22, x24, x21, LSL #2\n"
+    "add x21, x23, x28\n"
+    "ld1w { z10.s }, p2/Z, [x5]\n"
+    "mov x20, #0x0\n"
+    "sub x19, XZR, x25\n"
+    "ld1w { z11.s }, p2/Z, [x5, x11, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
+    "addvl x17, x17, #1\n"
+    "bge 4f\n"
+    "3:"  // Tile loop: Channel loop
+    "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
+    "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
+    "whilelt p1.s, x25, %x[n_channels]\n"
+    "incw x20\n"
+    "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
+    "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
+    "incw x25\n"
+    "mov p0.b, p2.b\n"
+    "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
+    "fmla z21.s, p3/M, z5.s, z12.s\n"
+    "incw x19\n"
+    "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
+    "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
+    "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
+    "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "fmla z16.s, p3/M, z0.s, z10.s\n"
+    "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x12]\n"
+    "fmla z22.s, p3/M, z4.s, z12.s\n"
+    "fmla z25.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x11, LSL #2]\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "fmla z21.s, p3/M, z7.s, z9.s\n"
+    "fmla z17.s, p3/M, z8.s, z12.s\n"
+    "fmla z18.s, p3/M, z7.s, z12.s\n"
+    "fmla z19.s, p3/M, z6.s, z12.s\n"
+    "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
+    "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x4, LSL #2]\n"
+    "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
+    "fmla z22.s, p3/M, z6.s, z9.s\n"
+    "ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
+    "fmla z25.s, p3/M, z4.s, z9.s\n"
+    "fmla z26.s, p3/M, z3.s, z9.s\n"
+    "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+    "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z15.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "fmla z20.s, p3/M, z8.s, z9.s\n"
+    "fmla z24.s, p3/M, z5.s, z9.s\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z21.s, p3/M, z8.s, z10.s\n"
+    "ld1w { z9.s }, p2/Z, [x6]\n"
+    "fmla z16.s, p3/M, z1.s, z12.s\n"
+    "fmla z17.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x11, LSL #2]\n"
+    "fmla z18.s, p3/M, z2.s, z11.s\n"
+    "fmla z19.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14]\n"
+    "fmla z22.s, p3/M, z7.s, z10.s\n"
+    "fmla z23.s, p3/M, z6.s, z10.s\n"
+    "fmla z25.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z4.s, z10.s\n"
+    "fmla z27.s, p3/M, z3.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z10.s\n"
+    "fmla z30.s, p3/M, z1.s, z10.s\n"
+    "fmla z31.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
+    "fmla z20.s, p3/M, z0.s, z9.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "fmla z21.s, p3/M, z1.s, z10.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x11, LSL #2]\n"
+    "fmla z16.s, p3/M, z3.s, z9.s\n"
+    "fmla z19.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z12.s\n"
+    "fmla z17.s, p3/M, z4.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "fmla z18.s, p3/M, z3.s, z10.s\n"
+    "fmla z22.s, p3/M, z0.s, z10.s\n"
+    "fmla z27.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x4, LSL #2]\n"
+    "fmla z20.s, p3/M, z2.s, z10.s\n"
+    "fmla z21.s, p3/M, z2.s, z12.s\n"
+    "fmla z16.s, p3/M, z5.s, z10.s\n"
+    "fmla z17.s, p3/M, z5.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "fmla z18.s, p3/M, z4.s, z12.s\n"
+    "fmla z19.s, p3/M, z3.s, z12.s\n"
+    "fmla z22.s, p3/M, z1.s, z12.s\n"
+    "fmla z23.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z28.s, p3/M, z7.s, z11.s\n"
+    "fmla z29.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x13, LSL #2]\n"
+    "fmla z20.s, p3/M, z4.s, z10.s\n"
+    "fmla z21.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z1.s, z10.s\n"
+    "fmla z25.s, p3/M, z0.s, z10.s\n"
+    "fmla z16.s, p3/M, z7.s, z10.s\n"
+    "fmla z17.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x5, x8, LSL #2]\n"
+    "fmla z30.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "fmla z18.s, p3/M, z8.s, z12.s\n"
+    "fmla z19.s, p3/M, z7.s, z12.s\n"
+    "fmla z22.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z2.s, z12.s\n"
+    "fmla z27.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
+    "addvl x5, x5, #1\n"
+    "fmla z20.s, p3/M, z7.s, z11.s\n"
+    "fmla z21.s, p3/M, z6.s, z11.s\n"
+    "fmla z24.s, p3/M, z4.s, z11.s\n"
+    "fmla z25.s, p3/M, z3.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "fmla z16.s, p3/M, z2.s, z10.s\n"
+    "fmla z17.s, p3/M, z1.s, z10.s\n"
+    "fmla z18.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x7]\n"
+    "fmla z30.s, p3/M, z2.s, z11.s\n"
+    "fmla z19.s, p3/M, z0.s, z12.s\n"
+    "fmla z20.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z0.s, z10.s\n"
+    "fmla z22.s, p3/M, z8.s, z11.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z5.s, z11.s\n"
+    "fmla z27.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z17.s, p3/M, z2.s, z12.s\n"
+    "fmla z18.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x11, LSL #2]\n"
+    "addvl x7, x7, #1\n"
+    "fmla z16.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16]\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "fmla z30.s, p3/M, z3.s, z11.s\n"
+    "fmla z19.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
+    "fmla z23.s, p3/M, z5.s, z12.s\n"
+    "fmla z27.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x16, x11, LSL #2]\n"
+    "addvl x16, x16, #1\n"
+    "fmla z20.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z3.s, z10.s\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x12, x8, LSL #2]\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "fmla z30.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z11.s\n"
+    "fmla z25.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z27.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z3.s, z11.s\n"
+    "fmla z23.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x12, x15, LSL #2]\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "addvl x12, x12, #1\n"
+    "ld1w { z10.s }, p2/Z, [x6, x4, LSL #2]\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z11.s\n"
+    "fmla z27.s, p3/M, z6.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "addvl x6, x6, #1\n"
+    "fmla z30.s, p3/M, z7.s, z12.s\n"
+    "fmla z31.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z16.s, p3/M, z4.s, z10.s\n"
+    "fmla z17.s, p3/M, z3.s, z10.s\n"
+    "fmla z20.s, p3/M, z1.s, z10.s\n"
+    "fmla z21.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "whilelt p2.s, x20, %x[n_channels]\n"
+    "fmla z18.s, p3/M, z5.s, z11.s\n"
+    "fmla z19.s, p3/M, z4.s, z11.s\n"
+    "cmp x25, %x[n_channels]\n"
+    "addvl x14, x14, #1\n"
+    "fmla z22.s, p3/M, z2.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p1/Z, [x5, x11, LSL #2]\n"
+    "fmla z24.s, p3/M, z7.s, z12.s\n"
+    "fmla z25.s, p3/M, z6.s, z12.s\n"
+    "fmla z28.s, p3/M, z4.s, z12.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "ld1w { z12.s }, p1/Z, [x7, x15, LSL #2]\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    ".inst 0xc1adc9d0  // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
+    ".inst 0xc1adc9d4  // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
+    "ld1w { z10.s }, p1/Z, [x5]\n"
+    ".inst 0xc1adc9d8  // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
+    ".inst 0xc1adc9dc  // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
+    "st1w { z16.s }, p0, [x27]\n"
+    "st1w { z17.s }, p0, [x27, x28, LSL #2]\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "st1w { z18.s }, p0, [x27, x23, LSL #2]\n"
+    "st1w { z19.s }, p0, [x27, x21, LSL #2]\n"
+    "addvl x27, x27, #1\n"
+    "st1w { z20.s }, p0, [x26]\n"
+    "st1w { z21.s }, p0, [x26, x28, LSL #2]\n"
+    "st1w { z22.s }, p0, [x26, x23, LSL #2]\n"
+    "st1w { z23.s }, p0, [x26, x21, LSL #2]\n"
+    "addvl x26, x26, #1\n"
+    "st1w { z24.s }, p0, [x24]\n"
+    "st1w { z25.s }, p0, [x24, x28, LSL #2]\n"
+    "st1w { z26.s }, p0, [x24, x23, LSL #2]\n"
+    "st1w { z27.s }, p0, [x24, x21, LSL #2]\n"
+    "addvl x24, x24, #1\n"
+    "st1w { z28.s }, p0, [x22]\n"
+    "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+    "st1w { z30.s }, p0, [x22, x23, LSL #2]\n"
+    "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+    "addvl x22, x22, #1\n"
+    "blt 3b\n"
+    "4:"  // Tile loop: Channel tail
+    "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
+    "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
+    "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "add x3, x3, #0x1\n"
+    "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
+    "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
+    "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "add x20, x2, #0x1\n"
+    "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
+    "fmla z21.s, p3/M, z5.s, z12.s\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "cmp x3, x19\n"
+    "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
+    "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+    "csel x2, x2, x20, LT\n"
+    "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
+    "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "mov p0.b, p2.b\n"
+    "fmla z16.s, p3/M, z0.s, z10.s\n"
+    "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x12]\n"
+    "csel x3, x3, XZR, LT\n"
+    "fmla z22.s, p3/M, z4.s, z12.s\n"
+    "fmla z25.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x11, LSL #2]\n"
+    "cmp x2, x19\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "fmla z21.s, p3/M, z7.s, z9.s\n"
+    "fmla z17.s, p3/M, z8.s, z12.s\n"
+    "fmla z18.s, p3/M, z7.s, z12.s\n"
+    "fmla z19.s, p3/M, z6.s, z12.s\n"
+    "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
+    "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x4, LSL #2]\n"
+    "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
+    "fmla z22.s, p3/M, z6.s, z9.s\n"
+    "ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
+    "fmla z25.s, p3/M, z4.s, z9.s\n"
+    "fmla z26.s, p3/M, z3.s, z9.s\n"
+    "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+    "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+    "fmla z20.s, p3/M, z8.s, z9.s\n"
+    "fmla z24.s, p3/M, z5.s, z9.s\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z21.s, p3/M, z8.s, z10.s\n"
+    "ld1w { z9.s }, p2/Z, [x6]\n"
+    "fmla z16.s, p3/M, z1.s, z12.s\n"
+    "fmla z17.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x11, LSL #2]\n"
+    "fmla z18.s, p3/M, z2.s, z11.s\n"
+    "fmla z19.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14]\n"
+    "fmla z22.s, p3/M, z7.s, z10.s\n"
+    "fmla z23.s, p3/M, z6.s, z10.s\n"
+    "fmla z25.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z4.s, z10.s\n"
+    "fmla z27.s, p3/M, z3.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z10.s\n"
+    "fmla z30.s, p3/M, z1.s, z10.s\n"
+    "fmla z31.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
+    "fmla z20.s, p3/M, z0.s, z9.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "fmla z21.s, p3/M, z1.s, z10.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x11, LSL #2]\n"
+    "fmla z16.s, p3/M, z3.s, z9.s\n"
+    "fmla z19.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z12.s\n"
+    "fmla z17.s, p3/M, z4.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "fmla z18.s, p3/M, z3.s, z10.s\n"
+    "fmla z22.s, p3/M, z0.s, z10.s\n"
+    "fmla z27.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x4, LSL #2]\n"
+    "fmla z20.s, p3/M, z2.s, z10.s\n"
+    "fmla z21.s, p3/M, z2.s, z12.s\n"
+    "fmla z16.s, p3/M, z5.s, z10.s\n"
+    "fmla z17.s, p3/M, z5.s, z12.s\n"
+    "ld1w { z10.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "fmla z18.s, p3/M, z4.s, z12.s\n"
+    "fmla z19.s, p3/M, z3.s, z12.s\n"
+    "fmla z22.s, p3/M, z1.s, z12.s\n"
+    "fmla z23.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z28.s, p3/M, z7.s, z11.s\n"
+    "fmla z29.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x13, LSL #2]\n"
+    "fmla z20.s, p3/M, z4.s, z10.s\n"
+    "fmla z21.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z1.s, z10.s\n"
+    "fmla z25.s, p3/M, z0.s, z10.s\n"
+    "fmla z16.s, p3/M, z7.s, z10.s\n"
+    "fmla z17.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x5, x8, LSL #2]\n"
+    "fmla z30.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "fmla z18.s, p3/M, z8.s, z12.s\n"
+    "fmla z19.s, p3/M, z7.s, z12.s\n"
+    "fmla z22.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z2.s, z12.s\n"
+    "fmla z27.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
+    "fmla z20.s, p3/M, z7.s, z11.s\n"
+    "fmla z21.s, p3/M, z6.s, z11.s\n"
+    "fmla z24.s, p3/M, z4.s, z11.s\n"
+    "fmla z25.s, p3/M, z3.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "fmla z16.s, p3/M, z2.s, z10.s\n"
+    "fmla z17.s, p3/M, z1.s, z10.s\n"
+    "fmla z18.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x7]\n"
+    "fmla z30.s, p3/M, z2.s, z11.s\n"
+    "fmla z19.s, p3/M, z0.s, z12.s\n"
+    "fmla z20.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z0.s, z10.s\n"
+    "fmla z22.s, p3/M, z8.s, z11.s\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z5.s, z11.s\n"
+    "fmla z27.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z17.s, p3/M, z2.s, z12.s\n"
+    "fmla z18.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x11, LSL #2]\n"
+    "fmla z16.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x16]\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "fmla z30.s, p3/M, z3.s, z11.s\n"
+    "fmla z19.s, p3/M, z8.s, z12.s\n"
+    "fmla z23.s, p3/M, z5.s, z12.s\n"
+    "fmla z27.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x16, x11, LSL #2]\n"
+    "fmla z20.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z3.s, z10.s\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x12, x8, LSL #2]\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "fmla z30.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z11.s\n"
+    "fmla z25.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z27.s, p3/M, z5.s, z12.s\n"
+    "fmla z29.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z3.s, z11.s\n"
+    "fmla z23.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x12, x15, LSL #2]\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x6, x4, LSL #2]\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z11.s\n"
+    "fmla z27.s, p3/M, z6.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z7.s, z12.s\n"
+    "fmla z31.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z16.s, p3/M, z4.s, z10.s\n"
+    "fmla z17.s, p3/M, z3.s, z10.s\n"
+    "fmla z20.s, p3/M, z1.s, z10.s\n"
+    "fmla z21.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "fmla z18.s, p3/M, z5.s, z11.s\n"
+    "fmla z19.s, p3/M, z4.s, z11.s\n"
+    "fmla z22.s, p3/M, z2.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z11.s\n"
+    "fmla z24.s, p3/M, z7.s, z12.s\n"
+    "fmla z25.s, p3/M, z6.s, z12.s\n"
+    "fmla z28.s, p3/M, z4.s, z12.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    ".inst 0xc1adc9d0  // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
+    ".inst 0xc1adc9d4  // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
+    "st1w { z16.s }, p0, [x27]\n"
+    ".inst 0xc1adc9d8  // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
+    ".inst 0xc1adc9dc  // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
+    "st1w { z17.s }, p0, [x27, x28, LSL #2]\n"
+    "st1w { z18.s }, p0, [x27, x23, LSL #2]\n"
+    "st1w { z19.s }, p0, [x27, x21, LSL #2]\n"
+    "st1w { z20.s }, p0, [x26]\n"
+    "st1w { z21.s }, p0, [x26, x28, LSL #2]\n"
+    "st1w { z22.s }, p0, [x26, x23, LSL #2]\n"
+    "st1w { z23.s }, p0, [x26, x21, LSL #2]\n"
+    "st1w { z24.s }, p0, [x24]\n"
+    "st1w { z25.s }, p0, [x24, x28, LSL #2]\n"
+    "st1w { z26.s }, p0, [x24, x23, LSL #2]\n"
+    "st1w { z27.s }, p0, [x24, x21, LSL #2]\n"
+    "st1w { z28.s }, p0, [x22]\n"
+    "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+    "st1w { z30.s }, p0, [x22, x23, LSL #2]\n"
+    "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+    "blt 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
new file mode 100644
index 0000000..d99ebb2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
+  const float *const *const input_ptrs,
+  float *const *const outptrs,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    float *const *outptrs;
+    const void *params;
+    const float min, max;
+    const float *inptrs[36];
+
+    Args(
+      const float *const *const input_ptrs,
+      float *const *const outptrs,
+      const void *const params,
+      const float min,
+      const float max
+    ) : outptrs(outptrs), params(params), min(min), max(max)
+    {
+      inptrs[0] = input_ptrs[14];
+      inptrs[1] = input_ptrs[0];
+      inptrs[2] = input_ptrs[5];
+      inptrs[3] = input_ptrs[15];
+      inptrs[4] = input_ptrs[30];
+      inptrs[5] = input_ptrs[35];
+      inptrs[6] = input_ptrs[20];
+      inptrs[7] = input_ptrs[1];
+      inptrs[8] = input_ptrs[4];
+      inptrs[9] = input_ptrs[21];
+      inptrs[10] = input_ptrs[6];
+      inptrs[11] = input_ptrs[11];
+      inptrs[12] = input_ptrs[24];
+      inptrs[13] = input_ptrs[8];
+      inptrs[14] = input_ptrs[29];
+      inptrs[15] = input_ptrs[9];
+      inptrs[16] = input_ptrs[31];
+      inptrs[17] = input_ptrs[13];
+      inptrs[18] = input_ptrs[34];
+      inptrs[19] = input_ptrs[16];
+      inptrs[20] = input_ptrs[2];
+      inptrs[21] = input_ptrs[19];
+      inptrs[22] = input_ptrs[3];
+      inptrs[23] = input_ptrs[12];
+      inptrs[24] = input_ptrs[22];
+      inptrs[25] = input_ptrs[17];
+      inptrs[26] = input_ptrs[18];
+      inptrs[27] = input_ptrs[26];
+      inptrs[28] = input_ptrs[23];
+      inptrs[29] = input_ptrs[32];
+      inptrs[30] = input_ptrs[27];
+      inptrs[31] = input_ptrs[33];
+      inptrs[32] = input_ptrs[7];
+      inptrs[33] = input_ptrs[10];
+      inptrs[34] = input_ptrs[25];
+      inptrs[35] = input_ptrs[28];
+
+    }
+  };
+
+  Args params_struct(input_ptrs, outptrs, params,
+                     activation_min, activation_max);
+
+  __asm__ __volatile__(
+    "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "ld1w { z15.s }, p3/Z, [x16]\n"
+    "addvl x16, x16, #1\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "cntw x10\n"
+    ".inst 0xa040c200  // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "mov x9, #0x0\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    ".inst 0xa040c204  // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+    "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+    "addvl x16, x16, #4\n"
+    "cmp x10, %x[n_channels]\n"
+    "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "sub x27, XZR, x10\n"
+    "ld1w { z8.s }, p3/Z, [x16]\n"
+    "addvl x16, x16, #1\n"
+    "ld1w { z9.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "bge 2f\n"
+    "1:"  // Channel loop
+    "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
+    "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
+    "ldr x26, [x15, #0x20]\n"
+    "incw x27\n"
+    "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
+    "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
+    "ldr x25, [x15, #0x30]\n"
+    "mov p1.b, p2.b\n"
+    "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
+    "ldr x24, [x15, #0x28]\n"
+    "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
+    "whilelt p0.s, x10, %x[n_channels]\n"
+    "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
+    "fmla z21.s, p3/M, z5.s, z12.s\n"
+    "ldr x23, [x15, #0x38]\n"
+    "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
+    "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x14, [x15, #0x40]\n"
+    "fmla z16.s, p3/M, z0.s, z10.s\n"
+    "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x13, [x15, #0x48]\n"
+    "fmla z22.s, p3/M, z4.s, z12.s\n"
+    "fmla z25.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x50]\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z17.s, p3/M, z8.s, z12.s\n"
+    "ldr x26, [x15, #0x60]\n"
+    "fmla z18.s, p3/M, z7.s, z12.s\n"
+    "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x24, [x15, #0x68]\n"
+    "fmla z21.s, p3/M, z7.s, z9.s\n"
+    "fmla z19.s, p3/M, z6.s, z12.s\n"
+    "ldr x11, [x15, #0x58]\n"
+    "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
+    "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x25, [x15, #0x70]\n"
+    "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
+    "fmla z22.s, p3/M, z6.s, z9.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "ldr x23, [x15, #0x78]\n"
+    "fmla z25.s, p3/M, z4.s, z9.s\n"
+    "fmla z26.s, p3/M, z3.s, z9.s\n"
+    "ldr x14, [x15, #0x80]\n"
+    "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+    "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+    "ldr x13, [x15, #0x88]\n"
+    "ld1w { z15.s }, p3/Z, [x16]\n"
+    "fmla z20.s, p3/M, z8.s, z9.s\n"
+    "fmla z24.s, p3/M, z5.s, z9.s\n"
+    "ldr x22, [x28, #0x0]\n"
+    "addvl x16, x16, #1\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z16.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z9.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x90]\n"
+    "fmla z17.s, p3/M, z0.s, z12.s\n"
+    "fmla z18.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "ldr x11, [x15, #0x98]\n"
+    "fmla z21.s, p3/M, z8.s, z10.s\n"
+    "fmla z19.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x26, [x15, #0xa0]\n"
+    "fmla z22.s, p3/M, z7.s, z10.s\n"
+    "fmla z23.s, p3/M, z6.s, z10.s\n"
+    "ldr x21, [x28, #0x8]\n"
+    "fmla z25.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x28, #0x10]\n"
+    "fmla z27.s, p3/M, z3.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z10.s\n"
+    "ldr x19, [x28, #0x18]\n"
+    "fmla z30.s, p3/M, z1.s, z10.s\n"
+    "fmla z31.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "ldr x24, [x15, #0xa8]\n"
+    "fmla z16.s, p3/M, z3.s, z9.s\n"
+    "fmla z20.s, p3/M, z0.s, z9.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x25, [x15, #0xb0]\n"
+    "fmla z17.s, p3/M, z4.s, z10.s\n"
+    "fmla z18.s, p3/M, z3.s, z10.s\n"
+    "fmla z21.s, p3/M, z1.s, z10.s\n"
+    "fmla z19.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z12.s\n"
+    "fmla z22.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x23, [x15, #0xb8]\n"
+    "fmla z27.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "ldr x14, [x15, #0xc0]\n"
+    "fmla z16.s, p3/M, z5.s, z10.s\n"
+    "fmla z20.s, p3/M, z2.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x13, [x15, #0xc8]\n"
+    "fmla z17.s, p3/M, z5.s, z12.s\n"
+    "fmla z18.s, p3/M, z4.s, z12.s\n"
+    "fmla z21.s, p3/M, z2.s, z12.s\n"
+    "fmla z19.s, p3/M, z3.s, z12.s\n"
+    "fmla z22.s, p3/M, z1.s, z12.s\n"
+    "fmla z23.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "ldr x11, [x15, #0xd8]\n"
+    "fmla z28.s, p3/M, z7.s, z11.s\n"
+    "fmla z29.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0xd0]\n"
+    "fmla z16.s, p3/M, z7.s, z10.s\n"
+    "fmla z17.s, p3/M, z6.s, z10.s\n"
+    "fmla z20.s, p3/M, z4.s, z10.s\n"
+    "fmla z21.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z1.s, z10.s\n"
+    "fmla z25.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x26, [x15, #0xe0]\n"
+    "fmla z18.s, p3/M, z8.s, z12.s\n"
+    "fmla z30.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z1.s, z12.s\n"
+    "ldr x24, [x15, #0xe8]\n"
+    "fmla z19.s, p3/M, z7.s, z12.s\n"
+    "fmla z22.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x25, [x15, #0xf0]\n"
+    "fmla z16.s, p3/M, z2.s, z10.s\n"
+    "fmla z17.s, p3/M, z1.s, z10.s\n"
+    "fmla z18.s, p3/M, z0.s, z10.s\n"
+    "fmla z20.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x23, [x15, #0xf8]\n"
+    "fmla z21.s, p3/M, z6.s, z11.s\n"
+    "fmla z24.s, p3/M, z4.s, z11.s\n"
+    "fmla z25.s, p3/M, z3.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z4.s, z11.s\n"
+    "ldr x14, [x15, #0x100]\n"
+    "fmla z30.s, p3/M, z2.s, z11.s\n"
+    "fmla z17.s, p3/M, z2.s, z12.s\n"
+    "fmla z18.s, p3/M, z1.s, z12.s\n"
+    "fmla z19.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x13, [x15, #0x108]\n"
+    "fmla z16.s, p3/M, z6.s, z10.s\n"
+    "fmla z20.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z0.s, z10.s\n"
+    "fmla z22.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x110]\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z5.s, z11.s\n"
+    "fmla z31.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z2.s, z12.s\n"
+    "ldr x11, [x15, #0x118]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "fmla z30.s, p3/M, z3.s, z11.s\n"
+    "fmla z19.s, p3/M, z8.s, z12.s\n"
+    "fmla z23.s, p3/M, z5.s, z12.s\n"
+    "fmla z20.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "fmla z24.s, p3/M, z3.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "fmla z25.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "fmla z27.s, p3/M, z5.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "fmla z30.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z6.s, z11.s\n"
+    "fmla z29.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "fmla z23.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "fmla z16.s, p3/M, z4.s, z10.s\n"
+    "fmla z17.s, p3/M, z3.s, z10.s\n"
+    "fmla z18.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z9.s }, p0/Z, [x14, x10, LSL #2]\n"
+    "fmla z19.s, p3/M, z4.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z12.s\n"
+    "fmla z30.s, p3/M, z7.s, z12.s\n"
+    "fmla z31.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "fmla z20.s, p3/M, z1.s, z10.s\n"
+    "fmla z21.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "fmla z22.s, p3/M, z2.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z11.s\n"
+    "incw x9\n"
+    "ld1w { z11.s }, p0/Z, [x12, x10, LSL #2]\n"
+    ".inst 0xc1adc9d0  // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
+    "st1w { z16.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x20]\n"
+    "fmla z24.s, p3/M, z7.s, z12.s\n"
+    "st1w { z17.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x28]\n"
+    "fmla z25.s, p3/M, z6.s, z12.s\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "st1w { z18.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x30]\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    ".inst 0xc1adc9d4  // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
+    "st1w { z19.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x38]\n"
+    "fmla z28.s, p3/M, z4.s, z12.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "st1w { z20.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x40]\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "st1w { z21.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x48]\n"
+    ".inst 0xc1adc9d8  // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
+    "ld1w { z10.s }, p0/Z, [x13, x10, LSL #2]\n"
+    "st1w { z22.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x50]\n"
+    "ld1w { z12.s }, p0/Z, [x11, x10, LSL #2]\n"
+    "incw x10\n"
+    "st1w { z23.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x58]\n"
+    ".inst 0xa040c200  // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+    "addvl x16, x16, #4\n"
+    "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x60]\n"
+    "whilelt p2.s, x9, %x[n_channels]\n"
+    ".inst 0xa040c204  // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+    "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x68]\n"
+    "addvl x16, x16, #4\n"
+    "cmp x10, %x[n_channels]\n"
+    "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x70]\n"
+    ".inst 0xc1adc9dc  // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
+    "ld1w { z8.s }, p3/Z, [x16]\n"
+    "st1w { z27.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x78]\n"
+    "addvl x16, x16, #1\n"
+    "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
+    "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
+    "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
+    "st1w { z31.s }, p1, [x19, x27, LSL #2]\n"
+    "blt 1b\n"
+    "2:"  // Channel tail
+    "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
+    "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
+    "ldr x26, [x15, #0x20]\n"
+    "incw x27\n"
+    "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
+    "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
+    "ldr x25, [x15, #0x30]\n"
+    "mov p1.b, p2.b\n"
+    "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
+    "ldr x24, [x15, #0x28]\n"
+    "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
+    "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
+    "fmla z21.s, p3/M, z5.s, z12.s\n"
+    "ldr x23, [x15, #0x38]\n"
+    "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
+    "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+    "ld1w { z9.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x14, [x15, #0x40]\n"
+    "fmla z16.s, p3/M, z0.s, z10.s\n"
+    "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x13, [x15, #0x48]\n"
+    "fmla z22.s, p3/M, z4.s, z12.s\n"
+    "fmla z25.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x50]\n"
+    "fmla z26.s, p3/M, z1.s, z12.s\n"
+    "fmla z17.s, p3/M, z8.s, z12.s\n"
+    "ldr x26, [x15, #0x60]\n"
+    "fmla z18.s, p3/M, z7.s, z12.s\n"
+    "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x24, [x15, #0x68]\n"
+    "fmla z21.s, p3/M, z7.s, z9.s\n"
+    "fmla z19.s, p3/M, z6.s, z12.s\n"
+    "ldr x11, [x15, #0x58]\n"
+    "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
+    "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x25, [x15, #0x70]\n"
+    "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
+    "fmla z22.s, p3/M, z6.s, z9.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "ldr x23, [x15, #0x78]\n"
+    "fmla z25.s, p3/M, z4.s, z9.s\n"
+    "fmla z26.s, p3/M, z3.s, z9.s\n"
+    "ldr x14, [x15, #0x80]\n"
+    "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+    "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+    "ldr x13, [x15, #0x88]\n"
+    "fmla z20.s, p3/M, z8.s, z9.s\n"
+    "fmla z24.s, p3/M, z5.s, z9.s\n"
+    "ldr x22, [x28, #0x0]\n"
+    "fmla z28.s, p3/M, z2.s, z9.s\n"
+    "fmla z16.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z9.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x90]\n"
+    "fmla z17.s, p3/M, z0.s, z12.s\n"
+    "fmla z18.s, p3/M, z2.s, z11.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "ldr x11, [x15, #0x98]\n"
+    "fmla z21.s, p3/M, z8.s, z10.s\n"
+    "fmla z19.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x26, [x15, #0xa0]\n"
+    "fmla z22.s, p3/M, z7.s, z10.s\n"
+    "fmla z23.s, p3/M, z6.s, z10.s\n"
+    "ldr x21, [x28, #0x8]\n"
+    "fmla z25.s, p3/M, z5.s, z10.s\n"
+    "fmla z26.s, p3/M, z4.s, z10.s\n"
+    "ldr x20, [x28, #0x10]\n"
+    "fmla z27.s, p3/M, z3.s, z10.s\n"
+    "fmla z29.s, p3/M, z2.s, z10.s\n"
+    "ldr x19, [x28, #0x18]\n"
+    "fmla z30.s, p3/M, z1.s, z10.s\n"
+    "fmla z31.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "ldr x24, [x15, #0xa8]\n"
+    "fmla z16.s, p3/M, z3.s, z9.s\n"
+    "fmla z20.s, p3/M, z0.s, z9.s\n"
+    "fmla z24.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x25, [x15, #0xb0]\n"
+    "fmla z17.s, p3/M, z4.s, z10.s\n"
+    "fmla z18.s, p3/M, z3.s, z10.s\n"
+    "fmla z21.s, p3/M, z1.s, z10.s\n"
+    "fmla z19.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z2.s, z12.s\n"
+    "fmla z22.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x23, [x15, #0xb8]\n"
+    "fmla z27.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "ldr x14, [x15, #0xc0]\n"
+    "fmla z16.s, p3/M, z5.s, z10.s\n"
+    "fmla z20.s, p3/M, z2.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x13, [x15, #0xc8]\n"
+    "fmla z17.s, p3/M, z5.s, z12.s\n"
+    "fmla z18.s, p3/M, z4.s, z12.s\n"
+    "fmla z21.s, p3/M, z2.s, z12.s\n"
+    "fmla z19.s, p3/M, z3.s, z12.s\n"
+    "fmla z22.s, p3/M, z1.s, z12.s\n"
+    "fmla z23.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "ldr x11, [x15, #0xd8]\n"
+    "fmla z28.s, p3/M, z7.s, z11.s\n"
+    "fmla z29.s, p3/M, z6.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0xd0]\n"
+    "fmla z16.s, p3/M, z7.s, z10.s\n"
+    "fmla z17.s, p3/M, z6.s, z10.s\n"
+    "fmla z20.s, p3/M, z4.s, z10.s\n"
+    "fmla z21.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z1.s, z10.s\n"
+    "fmla z25.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "ldr x26, [x15, #0xe0]\n"
+    "fmla z18.s, p3/M, z8.s, z12.s\n"
+    "fmla z30.s, p3/M, z8.s, z11.s\n"
+    "fmla z31.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z1.s, z12.s\n"
+    "ldr x24, [x15, #0xe8]\n"
+    "fmla z19.s, p3/M, z7.s, z12.s\n"
+    "fmla z22.s, p3/M, z5.s, z12.s\n"
+    "fmla z23.s, p3/M, z4.s, z12.s\n"
+    "fmla z26.s, p3/M, z2.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "ldr x25, [x15, #0xf0]\n"
+    "fmla z16.s, p3/M, z2.s, z10.s\n"
+    "fmla z17.s, p3/M, z1.s, z10.s\n"
+    "fmla z18.s, p3/M, z0.s, z10.s\n"
+    "fmla z20.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "ldr x23, [x15, #0xf8]\n"
+    "fmla z21.s, p3/M, z6.s, z11.s\n"
+    "fmla z24.s, p3/M, z4.s, z11.s\n"
+    "fmla z25.s, p3/M, z3.s, z11.s\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z0.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z4.s, z11.s\n"
+    "ldr x14, [x15, #0x100]\n"
+    "fmla z30.s, p3/M, z2.s, z11.s\n"
+    "fmla z17.s, p3/M, z2.s, z12.s\n"
+    "fmla z18.s, p3/M, z1.s, z12.s\n"
+    "fmla z19.s, p3/M, z0.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "ldr x13, [x15, #0x108]\n"
+    "fmla z16.s, p3/M, z6.s, z10.s\n"
+    "fmla z20.s, p3/M, z3.s, z10.s\n"
+    "fmla z24.s, p3/M, z0.s, z10.s\n"
+    "fmla z22.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "ldr x12, [x15, #0x110]\n"
+    "fmla z23.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z5.s, z11.s\n"
+    "fmla z31.s, p3/M, z1.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z2.s, z12.s\n"
+    "ldr x11, [x15, #0x118]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "fmla z30.s, p3/M, z3.s, z11.s\n"
+    "fmla z19.s, p3/M, z8.s, z12.s\n"
+    "fmla z23.s, p3/M, z5.s, z12.s\n"
+    "fmla z20.s, p3/M, z6.s, z10.s\n"
+    "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
+    "fmla z24.s, p3/M, z3.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+    "fmla z25.s, p3/M, z7.s, z11.s\n"
+    "fmla z26.s, p3/M, z6.s, z11.s\n"
+    "fmla z28.s, p3/M, z5.s, z11.s\n"
+    "fmla z27.s, p3/M, z5.s, z12.s\n"
+    "fmla z31.s, p3/M, z2.s, z12.s\n"
+    "fmla z29.s, p3/M, z7.s, z10.s\n"
+    "fmla z30.s, p3/M, z6.s, z10.s\n"
+    "fmla z24.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+    "fmla z28.s, p3/M, z8.s, z10.s\n"
+    "fmla z25.s, p3/M, z8.s, z11.s\n"
+    "fmla z26.s, p3/M, z7.s, z11.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
+    "fmla z27.s, p3/M, z6.s, z11.s\n"
+    "fmla z29.s, p3/M, z5.s, z11.s\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z3.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
+    "fmla z23.s, p3/M, z8.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+    "fmla z16.s, p3/M, z4.s, z10.s\n"
+    "fmla z17.s, p3/M, z3.s, z10.s\n"
+    "fmla z18.s, p3/M, z5.s, z11.s\n"
+    "fmla z19.s, p3/M, z4.s, z11.s\n"
+    "fmla z29.s, p3/M, z8.s, z12.s\n"
+    "fmla z30.s, p3/M, z7.s, z12.s\n"
+    "fmla z31.s, p3/M, z6.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
+    "fmla z20.s, p3/M, z1.s, z10.s\n"
+    "fmla z21.s, p3/M, z0.s, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x11, x9, LSL #2]\n"
+    "fmla z22.s, p3/M, z2.s, z11.s\n"
+    "fmla z23.s, p3/M, z1.s, z11.s\n"
+    ".inst 0xc1adc9d0  // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
+    "st1w { z16.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x20]\n"
+    "fmla z24.s, p3/M, z7.s, z12.s\n"
+    "st1w { z17.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x28]\n"
+    "fmla z25.s, p3/M, z6.s, z12.s\n"
+    "fmla z26.s, p3/M, z8.s, z10.s\n"
+    "st1w { z18.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x30]\n"
+    "fmla z27.s, p3/M, z7.s, z10.s\n"
+    ".inst 0xc1adc9d4  // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
+    "st1w { z19.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x38]\n"
+    "fmla z28.s, p3/M, z4.s, z12.s\n"
+    "fmla z29.s, p3/M, z3.s, z12.s\n"
+    "st1w { z20.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x40]\n"
+    "fmla z30.s, p3/M, z5.s, z10.s\n"
+    "fmla z31.s, p3/M, z4.s, z10.s\n"
+    "st1w { z21.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x48]\n"
+    ".inst 0xc1adc9d8  // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
+    ".inst 0xc1adc9dc  // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
+    "st1w { z22.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x50]\n"
+    "st1w { z23.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x58]\n"
+    "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
+    "ldr x22, [x28, #0x60]\n"
+    "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
+    "ldr x21, [x28, #0x68]\n"
+    "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
+    "ldr x20, [x28, #0x70]\n"
+    "st1w { z27.s }, p1, [x19, x27, LSL #2]\n"
+    "ldr x19, [x28, #0x78]\n"
+    "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
+    "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
+    "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
+    "st1w { z31.s }, p1, [x19, x27, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp
new file mode 100644
index 0000000..a4ca907
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(const float *const *const input_ptrs, float *const *const outptrs, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(const unsigned int n_tile_rows, const unsigned int n_tile_cols, const float *inptr, int64_t ld_input_row, int64_t ld_input_col, float *outptr, int64_t ld_output_row, int64_t ld_output_col, const void *params, unsigned int n_channels, const float activation_min, const float activation_max);
+
+class sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float>
+{
+  private:
+  using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>;
+  Parent::IndirectKernelType m_indirect_kernel = sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl;
+  Parent::DirectKernelType m_direct_kernel = sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl;
+
+  public:
+  using return_type = float;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  constexpr static unsigned int kernel_rows = 3;
+  constexpr static unsigned int kernel_cols = 3;
+
+  constexpr static unsigned int stride_rows = 2;
+  constexpr static unsigned int stride_cols = 2;
+
+  constexpr static unsigned int output_rows = 2;
+  constexpr static unsigned int output_cols = 2;
+
+  sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *)
+  : Parent(output_rows, output_cols, kernel_rows, kernel_cols, stride_rows, stride_cols) {}
+
+  arm_gemm::VLType get_vl_type(void) const override { return vl_type; }
+
+  Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; }
+  Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
new file mode 100644
index 0000000..449df1e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
+  const unsigned int n_tile_rows,
+  const unsigned int n_tile_cols,
+  const float *inptr,
+  int64_t ld_input_row,
+  int64_t ld_input_col,
+  float *outptr,
+  int64_t ld_output_row,
+  int64_t ld_output_col,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    const uint64_t n_tile_rows, n_tile_cols;
+    const float *inptr;
+    const uint64_t ld_input_row;
+    const uint64_t ld_input_col;
+    float *outptr;
+    const uint64_t ld_output_row;
+    const uint64_t ld_output_col;
+    const void *params;
+    const float min, max;
+
+    uint64_t tile_i = 0, tile_j = 0;
+
+    Args(
+      const unsigned int n_tile_rows,
+      const unsigned int n_tile_cols,
+      const float *inptr,
+      int64_t ld_input_row,
+      int64_t ld_input_col,
+      float *outptr,
+      int64_t ld_output_row,
+      int64_t ld_output_col,
+      const void *params,
+      const float activation_min,
+      const float activation_max
+    ) : n_tile_rows(n_tile_rows), n_tile_cols(n_tile_cols), inptr(inptr),
+        ld_input_row(ld_input_row), ld_input_col(ld_input_col), outptr(outptr),
+        ld_output_row(ld_output_row), ld_output_col(ld_output_col),
+        params(params), min(activation_min), max(activation_max)
+    {
+    }
+  };
+
+  Args params_struct(
+    n_tile_rows, n_tile_cols,
+    inptr, ld_input_row, ld_input_col,
+    outptr, ld_output_row, ld_output_col,
+    params, activation_min, activation_max
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ptrue p3.b\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "mov x2, #0x0\n"
+    "mov x3, #0x0\n"
+    "1:"  // Tile loop
+    "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "mov x21, #0x4\n"
+    "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+    "mul x19, x2, x20\n"  // offset = tile_i * ld_input_row
+    "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+    "madd x19, x3, x4, x19\n"  // offset += tile_j * ld_input_col
+    "mul x19, x19, x21\n"  // offset *= kernel_stride * output_size
+    "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+    "add x5, x5, x19, LSL #2\n"  // inptr[0] += offset * sizeof(float)
+    "add x6, x5, x20, LSL #2\n"
+    "add x7, x6, x20, LSL #2\n"
+    "add x8, x4, x4\n"
+    "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+    "add x16, x7, x20, LSL #2\n"
+    "add x15, x8, x4\n"
+    "add x14, x16, x20, LSL #2\n"
+    "add x13, x15, x4\n"
+    "cbnz x3, 2f\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "sub x20, x19, x3\n"
+    "sub x20, x20, #0x1\n"
+    "lsl x12, %x[n_channels], #0x2\n"
+    "mov x19, #0x10\n"
+    "and x20, x20, #0x3fffff\n"
+    "mul x19, x19, x4\n"
+    "orr x12, x12, x20, LSL #22\n"
+    "orr x12, x12, x19, LSL #38\n"
+    "add x25, x7, x8, LSL #2\n"
+    "add x24, x5, x4, LSL #2\n"
+    "add x23, x5, x15, LSL #2\n"
+    "add x22, x5, x13, LSL #2\n"
+    "add x21, x6, x4, LSL #2\n"
+    "add x20, x5, x8, LSL #2\n"
+    "add x19, x6, x15, LSL #2\n"
+    "add x11, x6, x13, LSL #2\n"
+    "add x10, x6, x8, LSL #2\n"
+    "add x9, x16, x4, LSL #2\n"
+    "add x28, x7, x4, LSL #2\n"
+    "add x27, x16, x15, LSL #2\n"
+    "add x26, x7, x15, LSL #2\n"
+    ".inst 0xf8ac4b3a  // rprfm pldonce, x25, [x12]\n"
+    "add x25, x16, x13, LSL #2\n"
+    ".inst 0xf8ac48ba  // rprfm pldonce, x5, [x12]\n"
+    ".inst 0xf8ac4b1a  // rprfm pldonce, x24, [x12]\n"
+    "add x24, x7, x13, LSL #2\n"
+    ".inst 0xf8ac4afa  // rprfm pldonce, x23, [x12]\n"
+    "add x23, x14, x4, LSL #2\n"
+    ".inst 0xf8ac4ada  // rprfm pldonce, x22, [x12]\n"
+    "add x22, x16, x8, LSL #2\n"
+    ".inst 0xf8ac48da  // rprfm pldonce, x6, [x12]\n"
+    ".inst 0xf8ac4aba  // rprfm pldonce, x21, [x12]\n"
+    "add x21, x14, x15, LSL #2\n"
+    ".inst 0xf8ac4a9a  // rprfm pldonce, x20, [x12]\n"
+    "add x20, x14, x8, LSL #2\n"
+    ".inst 0xf8ac4a7a  // rprfm pldonce, x19, [x12]\n"
+    "add x19, x14, x13, LSL #2\n"
+    ".inst 0xf8ac497a  // rprfm pldonce, x11, [x12]\n"
+    ".inst 0xf8ac495a  // rprfm pldonce, x10, [x12]\n"
+    ".inst 0xf8ac4a1a  // rprfm pldonce, x16, [x12]\n"
+    ".inst 0xf8ac48fa  // rprfm pldonce, x7, [x12]\n"
+    ".inst 0xf8ac493a  // rprfm pldonce, x9, [x12]\n"
+    ".inst 0xf8ac4b9a  // rprfm pldonce, x28, [x12]\n"
+    ".inst 0xf8ac4b7a  // rprfm pldonce, x27, [x12]\n"
+    ".inst 0xf8ac4b5a  // rprfm pldonce, x26, [x12]\n"
+    ".inst 0xf8ac4b3a  // rprfm pldonce, x25, [x12]\n"
+    ".inst 0xf8ac49da  // rprfm pldonce, x14, [x12]\n"
+    ".inst 0xf8ac4b1a  // rprfm pldonce, x24, [x12]\n"
+    ".inst 0xf8ac4afa  // rprfm pldonce, x23, [x12]\n"
+    ".inst 0xf8ac4ada  // rprfm pldonce, x22, [x12]\n"
+    ".inst 0xf8ac4aba  // rprfm pldonce, x21, [x12]\n"
+    ".inst 0xf8ac4a9a  // rprfm pldonce, x20, [x12]\n"
+    ".inst 0xf8ac4a7a  // rprfm pldonce, x19, [x12]\n"
+    "2:"  // Tile loop: Prefetch input rows: End
+    "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+    "mul x20, x2, x21\n"  // offset = tile_i * ld_output_row
+    "mov x19, #0x2\n"
+    "ld1w { z19.s }, p3/Z, [x17]\n"
+    "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+    "madd x20, x3, x24, x20\n"  // offset += tile_j * ld_output_col
+    "addvl x17, x17, #1\n"
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+    "mul x20, x20, x19\n"  // offset *= output_tile_size
+    "cntw x22\n"
+    "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "addvl x17, x17, #4\n"
+    "add x23, x23, x20, LSL #2\n"  // outptrs[0] += offset * sizeof(float)
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    "addvl x17, x17, #4\n"
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "cmp x22, %x[n_channels]\n"
+    "add x21, x23, x21, LSL #2\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "mov x20, #0x0\n"
+    "sub x19, XZR, x22\n"
+    "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x5]\n"
+    "addvl x17, x17, #1\n"
+    "ld1w { z11.s }, p2/Z, [x5, x4, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
+    "ld1w { z13.s }, p2/Z, [x5, x13, LSL #2]\n"
+    "ld1w { z14.s }, p2/Z, [x6]\n"
+    "ld1w { z15.s }, p2/Z, [x6, x4, LSL #2]\n"
+    "ld1w { z16.s }, p2/Z, [x5, x8, LSL #2]\n"
+    "bge 4f\n"
+    "3:"  // Tile loop: Channel loop
+    "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
+    "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
+    "whilelt p1.s, x22, %x[n_channels]\n"
+    "incw x20\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "incw x22\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z2.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "mov p0.b, p2.b\n"
+    "fmla z28.s, p3/M, z3.s, z14.s\n"
+    "fmla z29.s, p3/M, z0.s, z16.s\n"
+    "ld1w { z13.s }, p2/Z, [x6, x8, LSL #2]\n"
+    "addvl x5, x5, #1\n"
+    "fmla z28.s, p3/M, z4.s, z15.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "ld1w { z14.s }, p2/Z, [x16]\n"
+    "addvl x6, x6, #1\n"
+    "fmla z28.s, p3/M, z2.s, z16.s\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "ld1w { z15.s }, p2/Z, [x7]\n"
+    "incw x19\n"
+    "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
+    "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
+    "fmla z28.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "fmla z30.s, p3/M, z3.s, z14.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "fmla z30.s, p3/M, z0.s, z15.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z14.s\n"
+    "ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z15.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z1.s, z16.s\n"
+    "addvl x7, x7, #1\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z7.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x14]\n"
+    "ld1w { z16.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "fmla z30.s, p3/M, z6.s, z15.s\n"
+    "fmla z31.s, p3/M, z3.s, z16.s\n"
+    "addvl x16, x16, #1\n"
+    "ld1w { z13.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z7.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z31.s, p3/M, z7.s, z14.s\n"
+    "fmla z30.s, p3/M, z5.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z31.s, p3/M, z6.s, z15.s\n"
+    "fmla z29.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z8.s, z15.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "whilelt p2.s, x20, %x[n_channels]\n"
+    "ld1w { z19.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "cmp x22, %x[n_channels]\n"
+    ".inst 0xc1b1ca5c  // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
+    ".inst 0xa040c220  // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    "addvl x14, x14, #1\n"
+    "st1w { z28.s }, p0, [x23]\n"
+    ".inst 0xa040c224  // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+    "addvl x17, x17, #4\n"
+    "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+    "addvl x23, x23, #1\n"
+    "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
+    "st1w { z30.s }, p0, [x21]\n"
+    "ld1w { z10.s }, p1/Z, [x5]\n"
+    "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+    "addvl x21, x21, #1\n"
+    "ld1w { z11.s }, p1/Z, [x5, x4, LSL #2]\n"
+    "ld1w { z12.s }, p1/Z, [x5, x15, LSL #2]\n"
+    "ld1w { z13.s }, p1/Z, [x5, x13, LSL #2]\n"
+    "ld1w { z14.s }, p1/Z, [x6]\n"
+    "ld1w { z15.s }, p1/Z, [x6, x4, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x5, x8, LSL #2]\n"
+    "ld1w { z8.s }, p3/Z, [x17]\n"
+    "addvl x17, x17, #1\n"
+    "blt 3b\n"
+    "4:"  // Tile loop: Channel tail
+    "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
+    "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
+    "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+    "add x3, x3, #0x1\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z12.s }, p2/Z, [x6, x13, LSL #2]\n"
+    "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z2.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
+    "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+    "fmla z28.s, p3/M, z3.s, z14.s\n"
+    "fmla z29.s, p3/M, z0.s, z16.s\n"
+    "ld1w { z13.s }, p2/Z, [x6, x8, LSL #2]\n"
+    "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+    "fmla z28.s, p3/M, z4.s, z15.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "ld1w { z14.s }, p2/Z, [x16]\n"
+    "cmp x3, x19\n"
+    "fmla z28.s, p3/M, z2.s, z16.s\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "ld1w { z15.s }, p2/Z, [x7]\n"
+    "add x19, x2, #0x1\n"
+    "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
+    "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
+    "csel x2, x2, x19, LT\n"
+    "fmla z28.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x16, x15, LSL #2]\n"
+    "mov p0.b, p2.b\n"
+    "fmla z30.s, p3/M, z3.s, z14.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+    "csel x3, x3, XZR, LT\n"
+    "fmla z30.s, p3/M, z0.s, z15.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x16, x13, LSL #2]\n"
+    "cmp x2, x20\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z14.s\n"
+    "ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z15.s\n"
+    "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z1.s, z16.s\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z7.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x14]\n"
+    "ld1w { z16.s }, p2/Z, [x16, x8, LSL #2]\n"
+    "fmla z30.s, p3/M, z6.s, z15.s\n"
+    "fmla z31.s, p3/M, z3.s, z16.s\n"
+    "ld1w { z13.s }, p2/Z, [x14, x4, LSL #2]\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z7.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x14, x15, LSL #2]\n"
+    "fmla z31.s, p3/M, z7.s, z14.s\n"
+    "fmla z30.s, p3/M, z5.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x14, x8, LSL #2]\n"
+    "fmla z31.s, p3/M, z6.s, z15.s\n"
+    "fmla z29.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x14, x13, LSL #2]\n"
+    "fmla z30.s, p3/M, z8.s, z15.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    ".inst 0xc1b1ca5c  // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
+    "st1w { z28.s }, p0, [x23]\n"
+    "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+    "st1w { z30.s }, p0, [x21]\n"
+    "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+    "blt 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
new file mode 100644
index 0000000..063084e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
+  const float *const *const input_ptrs,
+  float *const *const outptrs,
+  const void *params,
+  unsigned int n_channels,
+  const float activation_min,
+  const float activation_max
+)
+{
+  struct Args
+  {
+    float *const *outptrs;
+    const void *params;
+    const float min, max;
+    const float *inptrs[25];
+
+    Args(
+      const float *const *const input_ptrs,
+      float *const *const outptrs,
+      const void *const params,
+      const float min,
+      const float max
+    ) : outptrs(outptrs), params(params), min(min), max(max)
+    {
+      inptrs[0] = input_ptrs[12];
+      inptrs[1] = input_ptrs[0];
+      inptrs[2] = input_ptrs[1];
+      inptrs[3] = input_ptrs[3];
+      inptrs[4] = input_ptrs[4];
+      inptrs[5] = input_ptrs[5];
+      inptrs[6] = input_ptrs[6];
+      inptrs[7] = input_ptrs[2];
+      inptrs[8] = input_ptrs[8];
+      inptrs[9] = input_ptrs[9];
+      inptrs[10] = input_ptrs[7];
+      inptrs[11] = input_ptrs[15];
+      inptrs[12] = input_ptrs[10];
+      inptrs[13] = input_ptrs[16];
+      inptrs[14] = input_ptrs[11];
+      inptrs[15] = input_ptrs[18];
+      inptrs[16] = input_ptrs[13];
+      inptrs[17] = input_ptrs[19];
+      inptrs[18] = input_ptrs[20];
+      inptrs[19] = input_ptrs[14];
+      inptrs[20] = input_ptrs[21];
+      inptrs[21] = input_ptrs[17];
+      inptrs[22] = input_ptrs[23];
+      inptrs[23] = input_ptrs[22];
+      inptrs[24] = input_ptrs[24];
+
+    }
+  };
+
+  Args params_struct(input_ptrs, outptrs, params,
+                     activation_min, activation_max);
+
+  __asm__ __volatile__(
+    "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+    "ptrue p3.b\n"
+    "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+    ".inst 0x25207810  // ptrue pn8.b\n"
+    "ld1w { z19.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "ldp x13, x12, [x19, #0x0]\n"
+    "cntw x11\n"
+    ".inst 0xa040c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+    "addvl x14, x14, #4\n"
+    "ldp x10, x9, [x19, #0x10]\n"
+    "mov x28, #0x0\n"
+    "whilelt p2.s, XZR, %x[n_channels]\n"
+    ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+    "ldp x27, x26, [x15, #0x0]\n"
+    "addvl x14, x14, #4\n"
+    "cmp x11, %x[n_channels]\n"
+    "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+    "ldp x25, x24, [x15, #0x10]\n"
+    "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+    "sub x23, XZR, x11\n"
+    "ldp x22, x21, [x15, #0x20]\n"
+    "ld1w { z8.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "ldp x20, x19, [x15, #0x30]\n"
+    "ld1w { z9.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
+    "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ld1w { z12.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z14.s }, p2/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z15.s }, p2/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z16.s }, p2/Z, [x19, x28, LSL #2]\n"
+    "bge 2f\n"
+    "1:"  // Channel loop
+    "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
+    "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
+    "ldr x27, [x15, #0x40]\n"
+    "whilelt p1.s, x11, %x[n_channels]\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z1.s, z12.s\n"
+    "ldr x26, [x15, #0x48]\n"
+    "ld1w { z12.s }, p2/Z, [x26, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z2.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "ldr x25, [x15, #0x50]\n"
+    "fmla z28.s, p3/M, z3.s, z14.s\n"
+    "fmla z29.s, p3/M, z0.s, z16.s\n"
+    "ld1w { z13.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ldr x24, [x15, #0x58]\n"
+    "fmla z28.s, p3/M, z4.s, z15.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "ldr x19, [x15, #0x78]\n"
+    "ld1w { z14.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z2.s, z16.s\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "ldr x22, [x15, #0x60]\n"
+    "ld1w { z15.s }, p2/Z, [x22, x28, LSL #2]\n"
+    "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
+    "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ldr x27, [x15, #0x80]\n"
+    "ld1w { z12.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x19, x28, LSL #2]\n"
+    "ldr x21, [x15, #0x68]\n"
+    "fmla z30.s, p3/M, z3.s, z14.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "ldr x26, [x15, #0x88]\n"
+    "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+    "fmla z30.s, p3/M, z0.s, z15.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x26, x28, LSL #2]\n"
+    "ldr x20, [x15, #0x70]\n"
+    "ldr x24, [x15, #0x98]\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z14.s\n"
+    "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z15.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "ldr x25, [x15, #0x90]\n"
+    "fmla z30.s, p3/M, z1.s, z16.s\n"
+    "ldr x21, [x15, #0xa8]\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z7.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
+    "ldr x22, [x15, #0xa0]\n"
+    "fmla z30.s, p3/M, z6.s, z15.s\n"
+    "fmla z31.s, p3/M, z3.s, z16.s\n"
+    "ldr x20, [x15, #0xb0]\n"
+    "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z7.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x20, x28, LSL #2]\n"
+    "ldr x19, [x15, #0xb8]\n"
+    "fmla z31.s, p3/M, z7.s, z14.s\n"
+    "fmla z30.s, p3/M, z5.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x19, x28, LSL #2]\n"
+    "ldr x27, [x15, #0xc0]\n"
+    "fmla z31.s, p3/M, z6.s, z15.s\n"
+    "fmla z29.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "ldp x27, x26, [x15, #0x0]\n"
+    "fmla z30.s, p3/M, z8.s, z15.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "ldp x25, x24, [x15, #0x10]\n"
+    "ld1w { z19.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "incw x28\n"
+    "ldp x22, x21, [x15, #0x20]\n"
+    "ld1w { z9.s }, p1/Z, [x27, x11, LSL #2]\n"
+    "incw x23\n"
+    "mov p0.b, p2.b\n"
+    "ldp x20, x19, [x15, #0x30]\n"
+    "ld1w { z10.s }, p1/Z, [x26, x11, LSL #2]\n"
+    "whilelt p2.s, x28, %x[n_channels]\n"
+    ".inst 0xc1b1ca5c  // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
+    "ld1w { z11.s }, p1/Z, [x25, x11, LSL #2]\n"
+    "st1w { z28.s }, p0, [x13, x23, LSL #2]\n"
+    "ld1w { z12.s }, p1/Z, [x24, x11, LSL #2]\n"
+    "st1w { z29.s }, p0, [x12, x23, LSL #2]\n"
+    "ld1w { z13.s }, p1/Z, [x22, x11, LSL #2]\n"
+    "st1w { z30.s }, p0, [x10, x23, LSL #2]\n"
+    "ld1w { z14.s }, p1/Z, [x21, x11, LSL #2]\n"
+    "st1w { z31.s }, p0, [x9, x23, LSL #2]\n"
+    "ld1w { z15.s }, p1/Z, [x20, x11, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x19, x11, LSL #2]\n"
+    "incw x11\n"
+    "cmp x11, %x[n_channels]\n"
+    ".inst 0xa040c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+    "addvl x14, x14, #4\n"
+    ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+    "addvl x14, x14, #4\n"
+    "ld1w { z8.s }, p3/Z, [x14]\n"
+    "addvl x14, x14, #1\n"
+    "blt 1b\n"
+    "2:"  // Channel tail
+    "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
+    "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
+    "ldr x27, [x15, #0x40]\n"
+    "incw x23\n"
+    "fmla z28.s, p3/M, z0.s, z10.s\n"
+    "fmla z29.s, p3/M, z1.s, z12.s\n"
+    "ldr x26, [x15, #0x48]\n"
+    "ld1w { z12.s }, p2/Z, [x26, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z1.s, z11.s\n"
+    "fmla z29.s, p3/M, z2.s, z13.s\n"
+    "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "ldr x25, [x15, #0x50]\n"
+    "fmla z28.s, p3/M, z3.s, z14.s\n"
+    "fmla z29.s, p3/M, z0.s, z16.s\n"
+    "ld1w { z13.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ldr x24, [x15, #0x58]\n"
+    "fmla z28.s, p3/M, z4.s, z15.s\n"
+    "fmla z29.s, p3/M, z4.s, z11.s\n"
+    "ldr x19, [x15, #0x78]\n"
+    "ld1w { z14.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z2.s, z16.s\n"
+    "fmla z29.s, p3/M, z5.s, z12.s\n"
+    "ldr x22, [x15, #0x60]\n"
+    "ld1w { z15.s }, p2/Z, [x22, x28, LSL #2]\n"
+    "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
+    "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
+    "ldr x27, [x15, #0x80]\n"
+    "ld1w { z12.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z5.s, z13.s\n"
+    "fmla z29.s, p3/M, z3.s, z13.s\n"
+    "ld1w { z13.s }, p2/Z, [x19, x28, LSL #2]\n"
+    "ldr x21, [x15, #0x68]\n"
+    "fmla z30.s, p3/M, z3.s, z14.s\n"
+    "fmla z31.s, p3/M, z4.s, z13.s\n"
+    "ldr x26, [x15, #0x88]\n"
+    "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+    "fmla z30.s, p3/M, z0.s, z15.s\n"
+    "fmla z31.s, p3/M, z1.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x26, x28, LSL #2]\n"
+    "ldr x20, [x15, #0x70]\n"
+    "ldr x24, [x15, #0x98]\n"
+    "fmla z30.s, p3/M, z4.s, z11.s\n"
+    "fmla z31.s, p3/M, z5.s, z14.s\n"
+    "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
+    "fmla z28.s, p3/M, z6.s, z15.s\n"
+    "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+    "ldr x25, [x15, #0x90]\n"
+    "fmla z30.s, p3/M, z1.s, z16.s\n"
+    "ldr x21, [x15, #0xa8]\n"
+    "fmla z31.s, p3/M, z2.s, z11.s\n"
+    "fmla z28.s, p3/M, z7.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x25, x28, LSL #2]\n"
+    "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
+    "ldr x22, [x15, #0xa0]\n"
+    "fmla z30.s, p3/M, z6.s, z15.s\n"
+    "fmla z31.s, p3/M, z3.s, z16.s\n"
+    "ldr x20, [x15, #0xb0]\n"
+    "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
+    "fmla z30.s, p3/M, z7.s, z13.s\n"
+    "fmla z29.s, p3/M, z7.s, z12.s\n"
+    "ld1w { z14.s }, p2/Z, [x20, x28, LSL #2]\n"
+    "ldr x19, [x15, #0xb8]\n"
+    "fmla z31.s, p3/M, z7.s, z14.s\n"
+    "fmla z30.s, p3/M, z5.s, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x19, x28, LSL #2]\n"
+    "ldr x27, [x15, #0xc0]\n"
+    "fmla z31.s, p3/M, z6.s, z15.s\n"
+    "fmla z29.s, p3/M, z8.s, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+    "fmla z30.s, p3/M, z8.s, z15.s\n"
+    "fmla z31.s, p3/M, z8.s, z11.s\n"
+    "mov p0.b, p2.b\n"
+    ".inst 0xc1b1ca5c  // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
+    "st1w { z28.s }, p0, [x13, x23, LSL #2]\n"
+    "st1w { z29.s }, p0, [x12, x23, LSL #2]\n"
+    "st1w { z30.s }, p0, [x10, x23, LSL #2]\n"
+    "st1w { z31.s }, p0, [x9, x23, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za.hpp
new file mode 100644
index 0000000..d29d0b5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32_planar_3x3_s1_4rows_mla_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32_planar_3x3_s1_4rows_mla_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32_planar_3x3_s1_4rows_mla_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
new file mode 100644
index 0000000..493166c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x6\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x7\n"
+    "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z5.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x16\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z11.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x17\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z16.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z16.s }, p1/Z, [x19, x15, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x14, #0x1\n"
+    "orr x23, x19, %x[ld_in_col], LSL #18\n"
+    "mov z17.d, z16.d\n"
+    "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xa1404ac0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x22]\n"
+    "orr x23, x16, x23, LSL #20\n"
+    "mov x21, #0x6\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ld1w { z3.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "addvl x22, x22, #3\n"
+    "add x20, x17, x7\n"
+    ".inst 0xa0404ac6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x22]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "mov z18.d, z16.d\n"
+    "mov z19.d, z16.d\n"
+    "ld1w { z9.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "addvl x22, x22, #3\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    ".inst 0xa1404ac2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x22]\n"
+    "lsl x23, x23, #0x2\n"
+    "sub x21, x21, x20\n"
+    "ld1w { z1.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "madd x19, x19, x17, x13\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b74a7c  // rprfm pldstrm, x23, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x17, x19, x13\n"
+    ".inst 0xc0040e00  // mova za.d[x8, #0], { z16.d-z19.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040e01  // mova za.d[x8, #1], { z16.d-z19.d }\n"
+    "mov x9, #0x2\n"
+    "ldp x28, x27, [x10], #0x10\n"
+    ".inst 0xc0040e02  // mova za.d[x8, #2], { z16.d-z19.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x24, x23, [x10], #0x10\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x9\n"
+    "csel x19, x20, x9, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x9, x9, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c0c  // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+    "sub x11, x11, x20\n"
+    ".inst 0xc1abc8ac  // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z12.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z13.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "st1w { z14.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z15.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x17, x7\n"
+    "bne 10f\n"
+    "cbz x9, 8f\n"
+    "cmp x9, #0x1\n"
+    "sub x14, x14, x9\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z26.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1301ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
+    "ld1w { z27.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1361b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z6.s\n"
+    "ld1w { z28.s }, p1/Z, [x19]\n"
+    ".inst 0xc1321b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z2.s\n"
+    "7:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z26.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1381ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
+    ".inst 0xc1301ae1  // fmla za.s[x8, 1], { z23.s-z26.s }, z0.s\n"
+    "ld1w { z27.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1371b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
+    "ld1w { z28.s }, p1/Z, [x19]\n"
+    ".inst 0xc1361b01  // fmla za.s[x8, 1], { z24.s-z27.s }, z6.s\n"
+    ".inst 0xc13a1b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
+    ".inst 0xc1321b21  // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
+    "8:"  // Unpadded: 0 priming loads
+    "cbz x14, 16f\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x13]\n"
+    "sub x14, x14, #0x1\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "cmp x14, x11\n"
+    "ld1w { z26.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "csel x20, x14, x11, LT\n"
+    "ld1w { z27.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z28.s }, p1/Z, [x19]\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 15f\n"
+    "9:"  // Unpadded: Main loop
+    ".inst 0xc1331ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc1391b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
+    ".inst 0xc1381ae1  // fmla za.s[x8, 1], { z23.s-z26.s }, z8.s\n"
+    ".inst 0xc1301ae2  // fmla za.s[x8, 2], { z23.s-z26.s }, z0.s\n"
+    "ld1w { z23.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1311b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
+    ".inst 0xc1371b01  // fmla za.s[x8, 1], { z24.s-z27.s }, z7.s\n"
+    ".inst 0xc1361b02  // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c0c  // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abc8ac  // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
+    "st1w { z12.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc13a1b21  // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+    "st1w { z13.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321b22  // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z26.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z14.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "ld1w { z27.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z15.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc0040e02  // mova za.d[x8, #2], { z16.d-z19.d }\n"
+    "ld1w { z28.s }, p1/Z, [x19]\n"
+    "bgt 9b\n"
+    "b 15f\n"
+    "10:"  // Padded
+    "cbz x9, 13f\n"
+    "cmp x9, #0x1\n"
+    "sub x14, x14, x9\n"
+    "beq 12f\n"
+    "11:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z23.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z26.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1301ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z27.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1361b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z6.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z28.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1321b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z2.s\n"
+    "12:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z23.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z26.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1381ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1301ae1  // fmla za.s[x8, 1], { z23.s-z26.s }, z0.s\n"
+    "ld1w { z27.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1371b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
+    "ld1w { z28.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1361b01  // fmla za.s[x8, 1], { z24.s-z27.s }, z6.s\n"
+    ".inst 0xc13a1b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
+    ".inst 0xc1321b21  // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
+    "13:"  // Padded: 0 priming loads
+    "cbz x14, 16f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z23.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z26.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "sub x14, x14, #0x1\n"
+    "sub x11, x11, #0x1\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "cmp x14, x11\n"
+    "ld1w { z27.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z28.s }, p0/Z, [x19]\n"
+    "csel x20, x14, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 15f\n"
+    "14:"  // Padded: Main loop
+    ".inst 0xc1331ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1391b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc1381ae1  // fmla za.s[x8, 1], { z23.s-z26.s }, z8.s\n"
+    ".inst 0xc1301ae2  // fmla za.s[x8, 2], { z23.s-z26.s }, z0.s\n"
+    "ld1w { z23.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1311b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
+    ".inst 0xc1371b01  // fmla za.s[x8, 1], { z24.s-z27.s }, z7.s\n"
+    ".inst 0xc1361b02  // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc0060c0c  // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abc8ac  // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
+    "st1w { z12.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc13a1b21  // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+    "st1w { z13.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321b22  // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z26.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z14.s }, p1, [x24]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z27.s }, p0/Z, [x19]\n"
+    "st1w { z15.s }, p1, [x23]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc0040e02  // mova za.d[x8, #2], { z16.d-z19.d }\n"
+    "ld1w { z28.s }, p0/Z, [x19]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 14b\n"
+    "15:"  // Main loop tail
+    ".inst 0xc1331ae0  // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
+    ".inst 0xc1391b00  // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
+    ".inst 0xc1381ae1  // fmla za.s[x8, 1], { z23.s-z26.s }, z8.s\n"
+    ".inst 0xc1301ae2  // fmla za.s[x8, 2], { z23.s-z26.s }, z0.s\n"
+    ".inst 0xc1311b20  // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
+    ".inst 0xc1371b01  // fmla za.s[x8, 1], { z24.s-z27.s }, z7.s\n"
+    ".inst 0xc1361b02  // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
+    ".inst 0xc0060c0c  // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abc8ac  // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
+    "st1w { z12.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc13a1b21  // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+    "st1w { z13.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321b22  // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z14.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z15.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc0040e02  // mova za.d[x8, #2], { z16.d-z19.d }\n"
+    "16:"  // Main loop skip tail
+    "cbz x11, 18f\n"
+    "17:"  // Right padding loop
+    ".inst 0xc0060c0c  // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc1abc8ac  // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
+    "st1w { z12.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040e02  // mova za.d[x8, #2], { z16.d-z19.d }\n"
+    "st1w { z13.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "st1w { z14.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z15.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 17b\n"
+    "18:"  // End
+    "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x22, ALL, MUL #9\n"
+    "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x15\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x15, x16\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za.hpp
new file mode 100644
index 0000000..18a5729
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32_planar_3x3_s2_4rows_mla_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32_planar_3x3_s2_4rows_mla_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32_planar_3x3_s2_4rows_mla_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
new file mode 100644
index 0000000..289803c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x9\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x7\n"
+    "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x16\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z19.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x17\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z24.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z24.s }, p1/Z, [x19, x15, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x14, #0x1\n"
+    "orr x23, x19, %x[ld_in_col], LSL #18\n"
+    "mov z25.d, z24.d\n"
+    "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xa0404ac2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x22]\n"
+    "orr x23, x16, x23, LSL #20\n"
+    "mov x21, #0x9\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ld1w { z7.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "addvl x22, x22, #3\n"
+    "add x20, x17, x7\n"
+    ".inst 0xa0404ac4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x22]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "mov z26.d, z24.d\n"
+    "mov z27.d, z24.d\n"
+    "ld1w { z6.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "addvl x22, x22, #3\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    ".inst 0xa1404ac1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x22]\n"
+    "lsl x23, x23, #0x2\n"
+    "sub x21, x21, x20\n"
+    "ld1w { z8.s }, p2/Z, [x22, #2, MUL VL]\n"
+    "madd x19, x19, x17, x13\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b74a7c  // rprfm pldstrm, x23, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x17, x19, x13\n"
+    ".inst 0xc0040f00  // mova za.d[x8, #0], { z24.d-z27.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f01  // mova za.d[x8, #1], { z24.d-z27.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x9, x28, [x10], #0x10\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x25, x24, [x10], #0x10\n"
+    "ldp x23, x22, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    "and x21, x20, #0x1\n"
+    "add x20, x20, #0x1\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    "lsr x20, x20, #0x1\n"
+    "sub x11, x11, x20\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x17, x7\n"
+    "bne 10f\n"
+    "cbz x21, 8f\n"
+    "cmp x21, #0x1\n"
+    "sub x14, x14, x21\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z29.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z30.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z31.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321980  // fmla za.s[x8, 0], { z12.s-z15.s }, z2.s\n"
+    "ld1w { z0.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z4.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc13119a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z1.s\n"
+    "7:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z29.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z30.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z31.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1331980  // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+    "ld1w { z0.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1351ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc13919a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+    "8:"  // Unpadded: 0 priming loads
+    "cmp x14, #0x2\n"
+    "blt 16f\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x13]\n"
+    "sub x14, x14, #0x2\n"
+    "ld1w { z29.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    "ld1w { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "lsr x19, x14, #0x1\n"
+    "ld1w { z30.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "cmp x19, x11\n"
+    "ld1w { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "csel x21, x19, x11, LT\n"
+    "ld1w { z31.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "and x14, x14, #0x1\n"
+    "ld1w { z0.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, x21\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "cbz x21, 15f\n"
+    "9:"  // Unpadded: Main loop
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xc1321981  // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
+    "ld1w { z12.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1361ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
+    ".inst 0xc1341ba1  // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
+    "ld1w { z29.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0xc13119a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
+    "ld1w { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "ld1w { z30.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    "ld1w { z31.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "ld1w { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1331980  // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+    "add x28, x28, x26, LSL #2\n"
+    "ld1w { z0.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1351ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0xc13919a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    "ld1w { z12.s }, p1/Z, [x13]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z29.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z30.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z31.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z0.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "bgt 9b\n"
+    "b 15f\n"
+    "10:"  // Padded
+    "cbz x21, 13f\n"
+    "cmp x21, #0x1\n"
+    "sub x14, x14, x21\n"
+    "beq 12f\n"
+    "11:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z29.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z30.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1321980  // fmla za.s[x8, 0], { z12.s-z15.s }, z2.s\n"
+    "ld1w { z0.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1341ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z4.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc13119a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z1.s\n"
+    "12:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z29.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z30.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1331980  // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+    "ld1w { z0.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1351ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc13919a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+    "13:"  // Padded: 0 priming loads
+    "cmp x14, #0x2\n"
+    "blt 16f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z29.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z13.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z30.s }, p0/Z, [x20]\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "sub x14, x14, #0x2\n"
+    "ld1w { z31.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x20]\n"
+    "sub x11, x11, #0x1\n"
+    "lsr x19, x14, #0x1\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z0.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "cmp x19, x11\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "csel x21, x19, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "and x14, x14, #0x1\n"
+    "sub x11, x11, x21\n"
+    "cbz x21, 15f\n"
+    "14:"  // Padded: Main loop
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1321981  // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1361ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341ba1  // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
+    "ld1w { z29.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xc13119a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
+    "ld1w { z13.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z30.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x20]\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z0.s }, p0/Z, [x20]\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1331980  // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1351ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+    "mov x12, #0x0\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc13919a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+    "ld1w { z29.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z30.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z0.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "add x28, x28, x26, LSL #2\n"
+    "add x25, x25, x23, LSL #2\n"
+    "add x24, x24, x22, LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "bgt 14b\n"
+    "15:"  // Main loop tail
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1321981  // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1361ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1341ba1  // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
+    "ld1w { z29.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0xc13119a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z30.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z0.s }, p0/Z, [x19]\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1331980  // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1351ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "add x28, x28, x26, LSL #2\n"
+    "add x25, x25, x23, LSL #2\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc13919a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+    "16:"  // Main loop skip tail
+    "cbz x14, 17f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z12.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z29.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z30.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z31.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "ld1w { z0.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1361ba0  // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc1321981  // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
+    "sub x11, x11, #0x1\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0xc1341ba1  // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc13119a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "17:"  // Tail input: End
+    "cbz x11, 19f\n"
+    "18:"  // Right padding loop
+    ".inst 0xc0060c14  // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc1b3cb94  // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+    "st1w { z20.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc0040f02  // mova za.d[x8, #2], { z24.d-z27.d }\n"
+    "st1w { z21.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z22.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z23.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 18b\n"
+    "19:"  // End
+    "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x22, ALL, MUL #9\n"
+    "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x15\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x15, x16\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za.hpp
new file mode 100644
index 0000000..0fa0300
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32_planar_5x5_s1_4rows_mla_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32_planar_5x5_s1_4rows_mla_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32_planar_5x5_s1_4rows_mla_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
new file mode 100644
index 0000000..0753e2d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
@@ -0,0 +1,883 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x8\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x6\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z22.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z11.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x7\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z28.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z28.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    "mov z29.d, z28.d\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "orr x22, x17, x22, LSL #20\n"
+    "mov x21, #0x8\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "add x20, x7, x6\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "mov z30.d, z28.d\n"
+    "mov z31.d, z28.d\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x22, x22, #0x2\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x7, x13\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x7, x19, x13\n"
+    ".inst 0xc0040f80  // mova za.d[x8, #0], { z28.d-z31.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f81  // mova za.d[x8, #1], { z28.d-z31.d }\n"
+    "mov x9, #0x4\n"
+    "ldp x28, x27, [x10], #0x10\n"
+    ".inst 0xc0040f82  // mova za.d[x8, #2], { z28.d-z31.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc0040f83  // mova za.d[x8, #3], { z28.d-z31.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ldp x24, x23, [x10], #0x10\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x9\n"
+    "csel x19, x20, x9, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x9, x9, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "sub x11, x11, x20\n"
+    ".inst 0xc1abcad8  // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z25.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "st1w { z26.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x7, x6\n"
+    "bne 12f\n"
+    "cbz x9, 10f\n"
+    "cmp x9, #0x1\n"
+    "sub x15, x15, x9\n"
+    "beq 9f\n"
+    "cmp x9, #0x2\n"
+    "beq 8f\n"
+    "cmp x9, #0x3\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 4 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z4.s\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13019e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z0.s\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1341a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z4.s\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1301a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1341a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "7:"  // Unpadded: 3 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13519c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z5.s\n"
+    ".inst 0xc13419c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z4.s\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13019e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1351a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z5.s\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1381a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    ".inst 0xc1301a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1351a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1341a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13219c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z2.s\n"
+    ".inst 0xc13519c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z5.s\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13619e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13819e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13019e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z0.s\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1351a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1341a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    ".inst 0xc1361a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1381a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1301a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xc1321a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xc1341a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13319c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z3.s\n"
+    ".inst 0xc13219c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z2.s\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13519c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z5.s\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c3  // fmla za.s[x8, 3], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13719e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z7.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13619e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z6.s\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13019e3  // fmla za.s[x8, 3], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1331a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z3.s\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    ".inst 0xc1321a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xc1341a03  // fmla za.s[x8, 3], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1371a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xc1381a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a23  // fmla za.s[x8, 3], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xc1331a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z3.s\n"
+    ".inst 0xc1321a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xc1341a43  // fmla za.s[x8, 3], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cbz x15, 20f\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "sub x15, x15, #0x1\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "cmp x15, x11\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "csel x20, x15, x11, LT\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, x20\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "cbz x20, 19f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc13a19c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc13119e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
+    ".inst 0xc13319c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
+    ".inst 0xc13219c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13519c3  // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
+    ".inst 0xc13419c4  // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc13719e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
+    "ld1w { z14.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc13619e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13819e3  // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xc13019e4  // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1311a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1331a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a03  // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xc1341a04  // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13a1a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1371a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xc1381a23  // fmla za.s[x8, 3], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a24  // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abcad8  // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc1331a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+    "st1w { z25.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "st1w { z26.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc1351a43  // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc1341a44  // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "bgt 11b\n"
+    "b 19f\n"
+    "12:"  // Padded
+    "cbz x9, 17f\n"
+    "cmp x9, #0x1\n"
+    "sub x15, x15, x9\n"
+    "beq 16f\n"
+    "cmp x9, #0x2\n"
+    "beq 15f\n"
+    "cmp x9, #0x3\n"
+    "beq 14f\n"
+    "13:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13019e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z0.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z4.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1301a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1341a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "14:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13519c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z5.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13419c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z4.s\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc13819e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13019e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1351a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z5.s\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1341a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1381a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    ".inst 0xc1301a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1351a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1341a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "15:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13219c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13519c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z5.s\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13619e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13819e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z8.s\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc13019e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1321a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1351a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z5.s\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    ".inst 0xc1341a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1361a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1381a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xc1321a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xc1341a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "16:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13319c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z3.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13219c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z2.s\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13519c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z5.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13419c3  // fmla za.s[x8, 3], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc13719e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z7.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13619e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z6.s\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc13819e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13019e3  // fmla za.s[x8, 3], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1331a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z3.s\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    ".inst 0xc1321a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xc1341a03  // fmla za.s[x8, 3], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1371a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xc1381a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a23  // fmla za.s[x8, 3], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xc1331a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z3.s\n"
+    ".inst 0xc1321a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z5.s\n"
+    ".inst 0xc1341a43  // fmla za.s[x8, 3], { z18.s-z21.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "17:"  // Padded: 0 priming loads
+    "cbz x15, 20f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "sub x15, x15, #0x1\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "sub x11, x11, #0x1\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "cmp x15, x11\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "csel x20, x15, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 19f\n"
+    "18:"  // Padded: Main loop
+    ".inst 0xc13a19c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13119e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc13319c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
+    ".inst 0xc13219c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13519c3  // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
+    ".inst 0xc13419c4  // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc13719e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
+    "ld1w { z14.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc13619e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13819e3  // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xc13019e4  // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1311a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1331a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1321a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a03  // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xc1341a04  // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc13a1a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1371a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xc1381a23  // fmla za.s[x8, 3], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a24  // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abcad8  // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc1331a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+    "st1w { z25.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "st1w { z26.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc1351a43  // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc1341a44  // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "bgt 18b\n"
+    "19:"  // Main loop tail
+    ".inst 0xc13a19c0  // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc13119e0  // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
+    ".inst 0xc13319c1  // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
+    ".inst 0xc13219c2  // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13519c3  // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
+    ".inst 0xc13419c4  // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1a00  // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
+    "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc13719e1  // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
+    ".inst 0xc13619e2  // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13819e3  // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
+    ".inst 0xc13019e4  // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
+    ".inst 0xa14049c0  // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1311a20  // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
+    "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1331a01  // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
+    ".inst 0xc1321a02  // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
+    ".inst 0xa04149c2  // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1351a03  // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
+    ".inst 0xc1341a04  // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13a1a40  // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
+    ".inst 0xc1371a21  // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
+    ".inst 0xc1361a22  // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
+    ".inst 0xc1381a23  // fmla za.s[x8, 3], { z17.s-z20.s }, z8.s\n"
+    ".inst 0xc1301a24  // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1abcad8  // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc1331a41  // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+    "st1w { z25.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc1321a42  // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
+    "st1w { z26.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc1351a43  // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc1341a44  // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "20:"  // Main loop skip tail
+    "cbz x11, 22f\n"
+    "21:"  // Right padding loop
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc1abcad8  // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "st1w { z25.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "st1w { z26.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 21b\n"
+    "22:"  // End
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x14, ALL, MUL #16\n"
+    "incb x14, ALL, MUL #9\n"
+    "str x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x16\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za.hpp
new file mode 100644
index 0000000..cae4b24
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32_planar_5x5_s2_4rows_mla_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32_planar_5x5_s2_4rows_mla_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32_planar_5x5_s2_4rows_mla_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
new file mode 100644
index 0000000..8920b3b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
@@ -0,0 +1,1172 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0xb\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x6\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z0.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z17.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x7\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z28.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z28.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    "mov z29.d, z28.d\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "orr x22, x17, x22, LSL #20\n"
+    "mov x21, #0xb\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "add x20, x7, x6\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "mov z30.d, z28.d\n"
+    "mov z31.d, z28.d\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x22, x22, #0x2\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x7, x13\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x7, x19, x13\n"
+    ".inst 0xc0040f80  // mova za.d[x8, #0], { z28.d-z31.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f81  // mova za.d[x8, #1], { z28.d-z31.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x9, x28, [x10], #0x10\n"
+    ".inst 0xc0040f82  // mova za.d[x8, #2], { z28.d-z31.d }\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    ".inst 0xc0040f83  // mova za.d[x8, #3], { z28.d-z31.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ldp x25, x24, [x10], #0x10\n"
+    "ldp x23, x22, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "and x21, x20, #0x1\n"
+    "add x20, x20, #0x1\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "lsr x20, x20, #0x1\n"
+    "sub x11, x11, x20\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x7, x6\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "cmp x21, #0x2\n"
+    "beq 8f\n"
+    "cmp x21, #0x3\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 4 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341960  // fmla za.s[x8, 0], { z11.s-z14.s }, z4.s\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1311aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z1.s\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1341980  // fmla za.s[x8, 0], { z12.s-z15.s }, z4.s\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1311ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13419a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "7:"  // Unpadded: 3 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1351960  // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z9.s\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1351980  // fmla za.s[x8, 0], { z12.s-z15.s }, z5.s\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1391ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13519a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1361960  // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
+    ".inst 0xc1341961  // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z2.s\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1311aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1361980  // fmla za.s[x8, 0], { z12.s-z15.s }, z6.s\n"
+    ".inst 0xc1341981  // fmla za.s[x8, 1], { z12.s-z15.s }, z4.s\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1311ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z1.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1321ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z2.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13419a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z4.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13619a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z6.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1371960  // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+    ".inst 0xc1351961  // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13a1aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    ".inst 0xc1351981  // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1391ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13519a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13719a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cmp x15, #0x2\n"
+    "blt 20f\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "sub x15, x15, #0x2\n"
+    "ld1w { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    "ld1w { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "lsr x19, x15, #0x1\n"
+    "ld1w { z22.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "cmp x19, x11\n"
+    "ld1w { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "csel x21, x19, x11, LT\n"
+    "ld1w { z23.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "and x15, x15, #0x1\n"
+    "ld1w { z24.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, x21\n"
+    "ld1w { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "ld1w { z25.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "cbz x21, 19f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc1381960  // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xc1361961  // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1341962  // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1331aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1321aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1311aa2  // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1381980  // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361981  // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1341982  // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1331ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
+    "ld1w { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1311ac2  // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0xc13619a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13419a2  // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
+    "ld1w { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "ld1w { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1371960  // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc1351961  // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "ld1w { z24.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13a1aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    ".inst 0xc1391aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+    "ld1w { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1351981  // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "ld1w { z25.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1391ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13519a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13719a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "ld1w { z11.s }, p1/Z, [x13]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z24.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z25.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "bgt 11b\n"
+    "b 19f\n"
+    "12:"  // Padded
+    "cbz x21, 17f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 16f\n"
+    "cmp x21, #0x2\n"
+    "beq 15f\n"
+    "cmp x21, #0x3\n"
+    "beq 14f\n"
+    "13:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1341960  // fmla za.s[x8, 0], { z11.s-z14.s }, z4.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1311aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z1.s\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1341980  // fmla za.s[x8, 0], { z12.s-z15.s }, z4.s\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1311ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13419a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    "14:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1351960  // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1391aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z9.s\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1351980  // fmla za.s[x8, 0], { z12.s-z15.s }, z5.s\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1391ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13519a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "15:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1361960  // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    ".inst 0xc1341961  // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1321aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z2.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1311aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1361980  // fmla za.s[x8, 0], { z12.s-z15.s }, z6.s\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1341981  // fmla za.s[x8, 1], { z12.s-z15.s }, z4.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1321ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z2.s\n"
+    ".inst 0xc1311ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13419a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z4.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13619a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z6.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "16:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1371960  // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    ".inst 0xc1351961  // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc13a1aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1391aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1351981  // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+    ".inst 0xc1391ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13519a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13719a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "17:"  // Padded: 0 priming loads
+    "cmp x15, #0x2\n"
+    "blt 20f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x20]\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z24.s }, p0/Z, [x20]\n"
+    "sub x15, x15, #0x2\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "sub x11, x11, #0x1\n"
+    "ld1w { z15.s }, p0/Z, [x20]\n"
+    "lsr x19, x15, #0x1\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "cmp x19, x11\n"
+    "ld1w { z25.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "csel x21, x19, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "and x15, x15, #0x1\n"
+    "sub x11, x11, x21\n"
+    "cbz x21, 19f\n"
+    "18:"  // Padded: Main loop
+    ".inst 0xc1381960  // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1361961  // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xc1341962  // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1331aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1321aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1311aa2  // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1381980  // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1361981  // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341982  // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1331ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1321ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
+    "ld1w { z21.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1311ac2  // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z12.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z22.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13619a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13419a2  // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
+    "ld1w { z13.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x8, x8, #0x1\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "ld1w { z14.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1371960  // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc1351961  // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "ld1w { z24.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13a1aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+    ".inst 0xc1391aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+    "ld1w { z15.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1351981  // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "add x25, x25, x23, LSL #2\n"
+    "ld1w { z25.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc1391ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13519a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13719a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "bgt 18b\n"
+    "19:"  // Main loop tail
+    ".inst 0xc1381960  // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1361961  // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1341962  // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1331aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1321aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1311aa2  // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1381980  // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1361981  // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc1341982  // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1331ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc1321ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1311ac2  // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13619a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc13419a2  // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "add x8, x8, #0x1\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1371960  // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc1351961  // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc13a1aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+    ".inst 0xc1391aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1351981  // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1371980  // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+    "add x25, x25, x23, LSL #2\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1391ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc13a1ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc13519a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    ".inst 0xc13719a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    "20:"  // Main loop skip tail
+    "cbz x15, 21f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z11.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z12.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z22.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z13.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z23.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z14.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1381960  // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
+    "ld1w { z24.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1331aa0  // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
+    "ld1w { z15.s }, p0/Z, [x19]\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1361961  // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1341962  // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    "sub x11, x11, #0x1\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1381980  // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
+    "ld1w { z25.s }, p0/Z, [x19]\n"
+    ".inst 0xc1321aa1  // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc1311aa2  // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
+    ".inst 0xa14049c1  // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1331ac0  // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
+    ".inst 0xa14149c2  // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    "addvl x14, x14, #5\n"
+    ".inst 0xc1361981  // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc1341982  // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
+    "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+    ".inst 0xc13819a0  // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+    ".inst 0xc1321ac1  // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
+    ".inst 0xa04049c4  // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+    ".inst 0xc1311ac2  // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
+    ".inst 0xa04149c6  // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc13619a1  // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc13419a2  // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "21:"  // Tail input: End
+    "cbz x11, 23f\n"
+    "22:"  // Right padding loop
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc1b1c818  // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+    "st1w { z24.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "st1w { z25.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z27.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 22b\n"
+    "23:"  // End
+    "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x14, ALL, MUL #16\n"
+    "incb x14, ALL, MUL #9\n"
+    "str x14, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x16\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..f09c616
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..e6c0cb7
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x6\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x7\n"
+    "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    "ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x16\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z29.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x17\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z22.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z22.s }, p1/Z, [x19, x15, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x20, x19\n"
+    "fmov z9.s, #0x0\n"
+    "ld1w { z25.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    "incb x19\n"
+    "ld1w { z27.s }, p2/Z, [x20]\n"
+    ".inst 0x648aab29  // bfcvtnt z9.h, p2/M, z25.s\n"
+    "incb x20, ALL, MUL #3\n"
+    "ld1w { z21.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x658aab28  // bfcvt z8.h, p2/M, z25.s\n"
+    "ld1w { z25.s }, p2/Z, [x20]\n"
+    ".inst 0x658aab66  // bfcvt z6.h, p2/M, z27.s\n"
+    "fmov z2.s, #0x0\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x658aab21  // bfcvt z1.h, p2/M, z25.s\n"
+    ".inst 0x648aab68  // bfcvtnt z8.h, p2/M, z27.s\n"
+    "incb x19\n"
+    "ld1w { z27.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x648aaaa6  // bfcvtnt z6.h, p2/M, z21.s\n"
+    ".inst 0x658aaaa5  // bfcvt z5.h, p2/M, z21.s\n"
+    "ld1w { z21.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x648aab22  // bfcvtnt z2.h, p2/M, z25.s\n"
+    "ld1w { z25.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x648aab61  // bfcvtnt z1.h, p2/M, z27.s\n"
+    ".inst 0x658aab6c  // bfcvt z12.h, p2/M, z27.s\n"
+    "ld1w { z27.s }, p2/Z, [x20]\n"
+    "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "incb x20, ALL, MUL #3\n"
+    "fmov z7.s, #0x0\n"
+    ".inst 0x658aab24  // bfcvt z4.h, p2/M, z25.s\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    ".inst 0x658aab60  // bfcvt z0.h, p2/M, z27.s\n"
+    ".inst 0x648aaaac  // bfcvtnt z12.h, p2/M, z21.s\n"
+    "sub x19, x14, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    ".inst 0x658aaaaa  // bfcvt z10.h, p2/M, z21.s\n"
+    "ld1w { z21.s }, p2/Z, [x20]\n"
+    "orr x22, x16, x22, LSL #20\n"
+    "mov x21, #0x6\n"
+    "add x20, x17, x7\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "mov z23.d, z22.d\n"
+    ".inst 0x648aab27  // bfcvtnt z7.h, p2/M, z25.s\n"
+    ".inst 0x648aab64  // bfcvtnt z4.h, p2/M, z27.s\n"
+    ".inst 0x648aaaa0  // bfcvtnt z0.h, p2/M, z21.s\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    ".inst 0x658aaaa3  // bfcvt z3.h, p2/M, z21.s\n"
+    "lsl x22, x22, #0x2\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x17, x13\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x17, x19, x13\n"
+    ".inst 0xc0040ac0  // mova za.d[x8, #0], { z22.d-z23.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040ac1  // mova za.d[x8, #1], { z22.d-z23.d }\n"
+    "mov x9, #0x2\n"
+    "ldp x28, x27, [x10], #0x10\n"
+    ".inst 0xc0040ac2  // mova za.d[x8, #2], { z22.d-z23.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc0040ac3  // mova za.d[x8, #3], { z22.d-z23.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040ac4  // mova za.d[x8, #4], { z22.d-z23.d }\n"
+    "ldp x24, x23, [x10], #0x10\n"
+    ".inst 0xc0040ac5  // mova za.d[x8, #5], { z22.d-z23.d }\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x9\n"
+    "csel x19, x20, x9, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x9, x9, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060818  // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+    "sub x11, x11, x20\n"
+    ".inst 0xc006083a  // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+    ".inst 0xc1bdcb98  // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "st1w { z25.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x17, x7\n"
+    "bne 10f\n"
+    "cbz x9, 8f\n"
+    "cmp x9, #0x1\n"
+    "sub x14, x14, x9\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc12811b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z8.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12911b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z9.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12511d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z6.h\n"
+    "7:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc12111b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12211b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z2.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12811b2  // bfdot za.s[x8, 2], { z13.h-z14.h }, z8.h\n"
+    ".inst 0xc12911b3  // bfdot za.s[x8, 3], { z13.h-z14.h }, z9.h\n"
+    ".inst 0xc12a11d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z10.h\n"
+    ".inst 0xc12c11d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z12.h\n"
+    ".inst 0xc12511d2  // bfdot za.s[x8, 2], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d3  // bfdot za.s[x8, 3], { z14.h-z15.h }, z6.h\n"
+    "8:"  // Unpadded: 0 priming loads
+    "cbz x14, 16f\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "sub x14, x14, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "cmp x14, x11\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "csel x20, x14, x11, LT\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 15f\n"
+    "9:"  // Unpadded: Main loop
+    ".inst 0xc12411b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z21.s }, p1/Z, [x13]\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc12711b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12311d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc12011d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z0.h\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12111b2  // bfdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12211b3  // bfdot za.s[x8, 3], { z13.h-z14.h }, z2.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc12811b4  // bfdot za.s[x8, 4], { z13.h-z14.h }, z8.h\n"
+    ".inst 0xc12911b5  // bfdot za.s[x8, 5], { z13.h-z14.h }, z9.h\n"
+    ".inst 0x658aaaad  // bfcvt z13.h, p2/M, z21.s\n"
+    ".inst 0x648aaa8d  // bfcvtnt z13.h, p2/M, z20.s\n"
+    ".inst 0xc12a11d2  // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
+    ".inst 0xc12c11d3  // bfdot za.s[x8, 3], { z14.h-z15.h }, z12.h\n"
+    ".inst 0xc12511d4  // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d5  // bfdot za.s[x8, 5], { z14.h-z15.h }, z6.h\n"
+    ".inst 0x658aaa6e  // bfcvt z14.h, p2/M, z19.s\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    ".inst 0xc0060818  // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+    ".inst 0x648aaa4e  // bfcvtnt z14.h, p2/M, z18.s\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc006083a  // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc1bdcb98  // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc0040ac4  // mova za.d[x8, #4], { z22.d-z23.d }\n"
+    "st1w { z25.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc0040ac5  // mova za.d[x8, #5], { z22.d-z23.d }\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 9b\n"
+    "b 15f\n"
+    "10:"  // Padded
+    "cbz x9, 13f\n"
+    "cmp x9, #0x1\n"
+    "sub x14, x14, x9\n"
+    "beq 12f\n"
+    "11:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12811b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z8.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12911b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z9.h\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc12511d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z6.h\n"
+    "12:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12111b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12211b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z2.h\n"
+    ".inst 0xc12811b2  // bfdot za.s[x8, 2], { z13.h-z14.h }, z8.h\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc12911b3  // bfdot za.s[x8, 3], { z13.h-z14.h }, z9.h\n"
+    ".inst 0xc12a11d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z10.h\n"
+    ".inst 0xc12c11d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z12.h\n"
+    ".inst 0xc12511d2  // bfdot za.s[x8, 2], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d3  // bfdot za.s[x8, 3], { z14.h-z15.h }, z6.h\n"
+    "13:"  // Padded: 0 priming loads
+    "cbz x14, 16f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "sub x14, x14, #0x1\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "sub x11, x11, #0x1\n"
+    "cmp x14, x11\n"
+    "csel x20, x14, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 15f\n"
+    "14:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0xc12411b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z21.s }, p0/Z, [x13]\n"
+    ".inst 0xc12711b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z7.h\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    ".inst 0xc12311d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc12011d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z0.h\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc12111b2  // bfdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12211b3  // bfdot za.s[x8, 3], { z13.h-z14.h }, z2.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12811b4  // bfdot za.s[x8, 4], { z13.h-z14.h }, z8.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc12911b5  // bfdot za.s[x8, 5], { z13.h-z14.h }, z9.h\n"
+    ".inst 0x658aaaad  // bfcvt z13.h, p2/M, z21.s\n"
+    ".inst 0x648aaa8d  // bfcvtnt z13.h, p2/M, z20.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc12a11d2  // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
+    ".inst 0xc12c11d3  // bfdot za.s[x8, 3], { z14.h-z15.h }, z12.h\n"
+    ".inst 0xc12511d4  // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
+    ".inst 0xc12611d5  // bfdot za.s[x8, 5], { z14.h-z15.h }, z6.h\n"
+    ".inst 0x658aaa6e  // bfcvt z14.h, p2/M, z19.s\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    ".inst 0xc0060818  // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+    ".inst 0x648aaa4e  // bfcvtnt z14.h, p2/M, z18.s\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc006083a  // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc1bdcb98  // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc0040ac4  // mova za.d[x8, #4], { z22.d-z23.d }\n"
+    "st1w { z25.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc0040ac5  // mova za.d[x8, #5], { z22.d-z23.d }\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 14b\n"
+    "15:"  // Main loop tail
+    ".inst 0xc12411b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
+    ".inst 0xc12711b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12311d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc12011d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z0.h\n"
+    ".inst 0xc12111b2  // bfdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc12211b3  // bfdot za.s[x8, 3], { z13.h-z14.h }, z2.h\n"
+    ".inst 0xc12811b4  // bfdot za.s[x8, 4], { z13.h-z14.h }, z8.h\n"
+    ".inst 0xc12911b5  // bfdot za.s[x8, 5], { z13.h-z14.h }, z9.h\n"
+    ".inst 0xc0060818  // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+    ".inst 0xc006083a  // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+    ".inst 0xc1bdcb98  // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc12a11d2  // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
+    "st1w { z26.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc12c11d3  // bfdot za.s[x8, 3], { z14.h-z15.h }, z12.h\n"
+    "st1w { z25.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc12511d4  // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    ".inst 0xc12611d5  // bfdot za.s[x8, 5], { z14.h-z15.h }, z6.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc0040ac4  // mova za.d[x8, #4], { z22.d-z23.d }\n"
+    ".inst 0xc0040ac5  // mova za.d[x8, #5], { z22.d-z23.d }\n"
+    "16:"  // Main loop skip tail
+    "cbz x11, 18f\n"
+    "17:"  // Right padding loop
+    ".inst 0xc0060818  // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc006083a  // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc1bdcb98  // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
+    "st1w { z24.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z26.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    ".inst 0xc0040ac4  // mova za.d[x8, #4], { z22.d-z23.d }\n"
+    "st1w { z25.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0xc0040ac5  // mova za.d[x8, #5], { z22.d-z23.d }\n"
+    "st1w { z27.s }, p1, [x23]\n"
+    "add x23, x23, x21, LSL #2\n"
+    "bgt 17b\n"
+    "18:"  // End
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x19, ALL, MUL #9\n"
+    "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x15\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x15, x16\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..89b9199
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..253f0da
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,763 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x9\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x7\n"
+    "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    "ld1rw { z27.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x16\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z23.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x17\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z4.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z4.s }, p1/Z, [x19, x15, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x20, x19\n"
+    "ld1w { z19.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    "incb x19\n"
+    "ld1w { z24.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x658aaa69  // bfcvt z9.h, p2/M, z19.s\n"
+    "ld1w { z12.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x648aab09  // bfcvtnt z9.h, p2/M, z24.s\n"
+    "incb x19\n"
+    "ld1w { z19.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x658aa983  // bfcvt z3.h, p2/M, z12.s\n"
+    ".inst 0x658aaa62  // bfcvt z2.h, p2/M, z19.s\n"
+    "ld1w { z24.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+    ".inst 0x648aab02  // bfcvtnt z2.h, p2/M, z24.s\n"
+    "ld1w { z12.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x658aa980  // bfcvt z0.h, p2/M, z12.s\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ld1w { z19.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    ".inst 0x658aaa6a  // bfcvt z10.h, p2/M, z19.s\n"
+    "sub x19, x14, #0x1\n"
+    "ld1w { z24.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #3\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    "mov z5.d, z4.d\n"
+    "ld1w { z12.s }, p2/Z, [x20]\n"
+    "orr x22, x16, x22, LSL #20\n"
+    "mov x21, #0x9\n"
+    "mov z6.d, z4.d\n"
+    "add x20, x17, x7\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "mov z7.d, z4.d\n"
+    ".inst 0x648aab0a  // bfcvtnt z10.h, p2/M, z24.s\n"
+    ".inst 0x658aa981  // bfcvt z1.h, p2/M, z12.s\n"
+    "mov x8, #0x0\n"
+    "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x22, x22, #0x2\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x17, x13\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x13, x17, x19, x13\n"
+    ".inst 0xc0040c80  // mova za.d[x8, #0], { z4.d-z7.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040c81  // mova za.d[x8, #1], { z4.d-z7.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x9, x28, [x10], #0x10\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x25, x24, [x10], #0x10\n"
+    "ldp x23, x22, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    "and x21, x20, #0x1\n"
+    "add x20, x20, #0x1\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "lsr x20, x20, #0x1\n"
+    "sub x11, x11, x20\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x17, x7\n"
+    "bne 10f\n"
+    "cbz x21, 8f\n"
+    "cmp x21, #0x1\n"
+    "sub x14, x14, x21\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 2 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaace  // bfcvt z14.h, p2/M, z22.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1331190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z3.h\n"
+    "7:"  // Unpadded: 1 priming loads
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1321170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1301190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+    "8:"  // Unpadded: 0 priming loads
+    "cmp x14, #0x2\n"
+    "blt 16f\n"
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "sub x14, x14, #0x2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x11, x11, #0x1\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "lsr x19, x14, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "cmp x19, x11\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "csel x21, x19, x11, LT\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "and x14, x14, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "sub x11, x11, x21\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "cbz x21, 15f\n"
+    "9:"  // Unpadded: Main loop
+    "add x20, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0xc13a1170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "ld1w { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1311190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1331191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x648aaa4b  // bfcvtnt z11.h, p2/M, z18.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa2c  // bfcvtnt z12.h, p2/M, z17.s\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x13]\n"
+    ".inst 0xc1321170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "subs x21, x21, #0x1\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1301190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+    "add x9, x9, x27, LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x25, x25, x23, LSL #2\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa8b  // bfcvtnt z11.h, p2/M, z20.s\n"
+    ".inst 0x648aaa6c  // bfcvtnt z12.h, p2/M, z19.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa4d  // bfcvtnt z13.h, p2/M, z18.s\n"
+    ".inst 0x648aaa2e  // bfcvtnt z14.h, p2/M, z17.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "bgt 9b\n"
+    "b 15f\n"
+    "10:"  // Padded
+    "cbz x21, 13f\n"
+    "cmp x21, #0x1\n"
+    "sub x14, x14, x21\n"
+    "beq 12f\n"
+    "11:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1331190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z3.h\n"
+    "12:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1321170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1301190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+    "13:"  // Padded: 0 priming loads
+    "cmp x14, #0x2\n"
+    "blt 16f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "sub x14, x14, #0x2\n"
+    "sub x11, x11, #0x1\n"
+    "lsr x19, x14, #0x1\n"
+    "cmp x19, x11\n"
+    "csel x20, x19, x11, LT\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "and x14, x14, #0x1\n"
+    "sub x11, x11, x20\n"
+    "cbz x20, 15f\n"
+    "14:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z18.s }, p0/Z, [x13]\n"
+    ".inst 0xc13a1170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc1311190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    ".inst 0xc1331191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa4b  // bfcvt z11.h, p2/M, z18.s\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa6b  // bfcvtnt z11.h, p2/M, z19.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa2c  // bfcvtnt z12.h, p2/M, z17.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa4d  // bfcvtnt z13.h, p2/M, z18.s\n"
+    "mov x12, #0x0\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "add x8, x8, #0x1\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1301190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa2c  // bfcvt z12.h, p2/M, z17.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa6d  // bfcvt z13.h, p2/M, z19.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "subs x20, x20, #0x1\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "add x28, x28, x26, LSL #2\n"
+    "add x25, x25, x23, LSL #2\n"
+    ".inst 0x648aaaab  // bfcvtnt z11.h, p2/M, z21.s\n"
+    ".inst 0x648aaa8c  // bfcvtnt z12.h, p2/M, z20.s\n"
+    "add x24, x24, x22, LSL #2\n"
+    ".inst 0x648aaa4d  // bfcvtnt z13.h, p2/M, z18.s\n"
+    ".inst 0x648aaa2e  // bfcvtnt z14.h, p2/M, z17.s\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "bgt 14b\n"
+    "15:"  // Main loop tail
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z17.s }, p0/Z, [x13]\n"
+    ".inst 0xc13a1170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc1311190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    ".inst 0xc1331191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa2b  // bfcvt z11.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa6b  // bfcvtnt z11.h, p2/M, z19.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0x648aaa4c  // bfcvtnt z12.h, p2/M, z18.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0x648aaa2d  // bfcvtnt z13.h, p2/M, z17.s\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1321170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "add x13, x13, %x[ld_in_col], LSL #2\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    ".inst 0xc1301190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "16:"  // Main loop skip tail
+    "cbz x14, 17f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x13]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x19, x13, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc13a1170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
+    "sub x11, x11, #0x1\n"
+    ".inst 0xc1311190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc1331191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "17:"  // Tail input: End
+    "cbz x11, 19f\n"
+    "18:"  // Right padding loop
+    ".inst 0xc0060c1c  // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x11, x11, #0x1\n"
+    ".inst 0xc1b7cb7c  // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1w { z28.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "st1w { z29.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z30.s }, p1, [x25]\n"
+    "add x25, x25, x23, LSL #2\n"
+    "st1w { z31.s }, p1, [x24]\n"
+    "add x24, x24, x22, LSL #2\n"
+    "bgt 18b\n"
+    "19:"  // End
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x19, ALL, MUL #9\n"
+    "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x15\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x15, x16\n"
+    "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x13, x13, x19, LSL #2\n"
+    "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x10, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x0]\n"
+    "ldp x22, x21, [x10, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x10, #0x10]\n"
+    "b.any 1b\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..c2d439f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..17f2455
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,1151 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x8\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x6\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z26.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x7\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z31.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x5\n"
+    "addvl SP, SP, #-30\n"
+    "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z24.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z24.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x20, x19\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "ld1w { z11.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "fmov z4.s, #0x0\n"
+    "incb x19\n"
+    "ld1w { z3.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aaa45  // bfcvt z5.h, p2/M, z18.s\n"
+    ".inst 0x658aa966  // bfcvt z6.h, p2/M, z11.s\n"
+    "ld1w { z17.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "addvl x23, SP, #30\n"
+    ".inst 0x648aaa44  // bfcvtnt z4.h, p2/M, z18.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x658aa867  // bfcvt z7.h, p2/M, z3.s\n"
+    "addvl x23, x23, #-6\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    ".inst 0x658aaa28  // bfcvt z8.h, p2/M, z17.s\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x23]\n"
+    ".inst 0x648aa965  // bfcvtnt z5.h, p2/M, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "fmov z4.s, #0x0\n"
+    "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+    ".inst 0x648aa866  // bfcvtnt z6.h, p2/M, z3.s\n"
+    "ld1w { z3.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aaa45  // bfcvt z5.h, p2/M, z18.s\n"
+    ".inst 0x648aaa27  // bfcvtnt z7.h, p2/M, z17.s\n"
+    "incb x19\n"
+    "ld1w { z17.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
+    ".inst 0x648aaa08  // bfcvtnt z8.h, p2/M, z16.s\n"
+    ".inst 0x658aaa09  // bfcvt z9.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    ".inst 0x658aa966  // bfcvt z6.h, p2/M, z11.s\n"
+    "mov x20, x19\n"
+    "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+    ".inst 0x648aaa44  // bfcvtnt z4.h, p2/M, z18.s\n"
+    ".inst 0x658aa867  // bfcvt z7.h, p2/M, z3.s\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+    ".inst 0x648aa965  // bfcvtnt z5.h, p2/M, z11.s\n"
+    ".inst 0x658aaa28  // bfcvt z8.h, p2/M, z17.s\n"
+    "incb x19\n"
+    "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+    "addvl x23, x23, #-6\n"
+    "ld1w { z11.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x23]\n"
+    "fmov z4.s, #0x0\n"
+    ".inst 0x648aa866  // bfcvtnt z6.h, p2/M, z3.s\n"
+    "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+    "ld1w { z3.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aaa45  // bfcvt z5.h, p2/M, z18.s\n"
+    "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
+    ".inst 0x648aaa27  // bfcvtnt z7.h, p2/M, z17.s\n"
+    "ld1w { z17.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa966  // bfcvt z6.h, p2/M, z11.s\n"
+    ".inst 0x648aaa08  // bfcvtnt z8.h, p2/M, z16.s\n"
+    "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    ".inst 0x658aaa09  // bfcvt z9.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x648aaa44  // bfcvtnt z4.h, p2/M, z18.s\n"
+    ".inst 0x658aa867  // bfcvt z7.h, p2/M, z3.s\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+    "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+    "addvl x23, x23, #-6\n"
+    ".inst 0x648aa965  // bfcvtnt z5.h, p2/M, z11.s\n"
+    ".inst 0x658aaa28  // bfcvt z8.h, p2/M, z17.s\n"
+    "ld1w { z11.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x23]\n"
+    ".inst 0x648aa866  // bfcvtnt z6.h, p2/M, z3.s\n"
+    "ld1w { z3.s }, p2/Z, [x20]\n"
+    "fmov z4.s, #0x0\n"
+    "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aaa45  // bfcvt z5.h, p2/M, z18.s\n"
+    "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
+    ".inst 0x648aaa27  // bfcvtnt z7.h, p2/M, z17.s\n"
+    "incb x19\n"
+    "ld1w { z17.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa966  // bfcvt z6.h, p2/M, z11.s\n"
+    "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+    ".inst 0x648aaa08  // bfcvtnt z8.h, p2/M, z16.s\n"
+    ".inst 0x658aaa09  // bfcvt z9.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    ".inst 0x658aa867  // bfcvt z7.h, p2/M, z3.s\n"
+    ".inst 0x648aaa44  // bfcvtnt z4.h, p2/M, z18.s\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x648aa965  // bfcvtnt z5.h, p2/M, z11.s\n"
+    "ld1w { z11.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+    ".inst 0x648aa866  // bfcvtnt z6.h, p2/M, z3.s\n"
+    ".inst 0x658aaa28  // bfcvt z8.h, p2/M, z17.s\n"
+    "ld1w { z3.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x648aaa27  // bfcvtnt z7.h, p2/M, z17.s\n"
+    "ld1w { z17.s }, p2/Z, [x20]\n"
+    "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+    "addvl x23, x23, #-6\n"
+    "st1h { z4.h }, p2, [x23]\n"
+    ".inst 0x648aaa08  // bfcvtnt z8.h, p2/M, z16.s\n"
+    "incb x20, ALL, MUL #5\n"
+    "fmov z4.s, #0x0\n"
+    "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+    ".inst 0x658aaa45  // bfcvt z5.h, p2/M, z18.s\n"
+    ".inst 0x658aaa09  // bfcvt z9.h, p2/M, z16.s\n"
+    "sub x19, x16, #0x1\n"
+    "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
+    ".inst 0x658aa966  // bfcvt z6.h, p2/M, z11.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+    ".inst 0x658aa867  // bfcvt z7.h, p2/M, z3.s\n"
+    "orr x22, x7, x22, LSL #20\n"
+    "mov x21, #0x8\n"
+    "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+    ".inst 0x658aaa28  // bfcvt z8.h, p2/M, z17.s\n"
+    "add x20, x5, x6\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+    "addvl x23, x23, #-6\n"
+    "mov z25.d, z24.d\n"
+    ".inst 0x648aaa44  // bfcvtnt z4.h, p2/M, z18.s\n"
+    "st1h { z4.h }, p2, [x23]\n"
+    ".inst 0x648aa965  // bfcvtnt z5.h, p2/M, z11.s\n"
+    ".inst 0x648aa866  // bfcvtnt z6.h, p2/M, z3.s\n"
+    "mov x11, #0x0\n"
+    "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+    ".inst 0x648aaa27  // bfcvtnt z7.h, p2/M, z17.s\n"
+    ".inst 0x648aaa08  // bfcvtnt z8.h, p2/M, z16.s\n"
+    "mov x8, #0x8\n"
+    "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
+    ".inst 0x658aaa09  // bfcvt z9.h, p2/M, z16.s\n"
+    "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x22, x22, #0x2\n"
+    "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x5, x15\n"
+    "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+    "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x15, x5, x19, x15\n"
+    ".inst 0xc0046b00  // mova za.d[x11, #0], { z24.d-z25.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046b01  // mova za.d[x11, #1], { z24.d-z25.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x13, x0, [x24], #0x10\n"
+    ".inst 0xc0046b02  // mova za.d[x11, #2], { z24.d-z25.d }\n"
+    "ldp x10, x9, [x19], #0x10\n"
+    ".inst 0xc0046b03  // mova za.d[x11, #3], { z24.d-z25.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0046b04  // mova za.d[x11, #4], { z24.d-z25.d }\n"
+    "ldp x28, x27, [x24], #0x10\n"
+    ".inst 0xc0046b05  // mova za.d[x11, #5], { z24.d-z25.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc0046b06  // mova za.d[x11, #6], { z24.d-z25.d }\n"
+    ".inst 0xc0046b07  // mova za.d[x11, #7], { z24.d-z25.d }\n"
+    ".inst 0xc0040b00  // mova za.d[x8, #0], { z24.d-z25.d }\n"
+    ".inst 0xc0040b01  // mova za.d[x8, #1], { z24.d-z25.d }\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0066800  // mova { z0.d-z1.d }, za.d[x11, #0]\n"
+    "sub x14, x14, x20\n"
+    ".inst 0xc0066822  // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+    ".inst 0xc1bfcb40  // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z0.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z2.s }, p1, [x0]\n"
+    "add x0, x0, x9, LSL #2\n"
+    "st1w { z1.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "st1w { z3.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x5, x6\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 9f\n"
+    "cmp x21, #0x2\n"
+    "beq 8f\n"
+    "cmp x21, #0x3\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 4 priming loads
+    "add x20, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "addvl x19, SP, #24\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    "7:"  // Unpadded: 3 priming loads
+    "add x21, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "addvl x20, SP, #18\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "addvl x19, SP, #24\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x22, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "addvl x21, SP, #12\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "addvl x20, SP, #18\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "addvl x19, SP, #24\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x23, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "addvl x22, SP, #6\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "addvl x21, SP, #12\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "addvl x20, SP, #18\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "addvl x19, SP, #24\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xa0402ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xa0412ac6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    "ld1w { z16.s }, p1/Z, [x23]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xa0422ac8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1257196  // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247197  // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12771b6  // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b7  // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d6  // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d7  // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    ".inst 0xa0402be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be6  // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422be8  // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 20f\n"
+    "add x19, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "sub x16, x16, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "sub x14, x14, #0x1\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "cmp x16, x14\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "csel x24, x16, x14, LT\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "sub x14, x14, x24\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "cbz x24, 19f\n"
+    "11:"  // Unpadded: Main loop
+    "addvl x23, SP, #6\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "addvl x22, SP, #12\n"
+    "ld1w { z23.s }, p1/Z, [x15]\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402ae4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
+    "addvl x21, SP, #18\n"
+    "addvl x20, SP, #24\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    "add x19, x15, %x[ld_in_row], LSL #2\n"
+    "ld1w { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+    "subs x24, x24, #0x1\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412ae6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412ac6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422ae8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+    ".inst 0xc1257196  // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1247197  // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422ac8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc12771b6  // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b7  // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc12971d6  // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d7  // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1251190  // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1241191  // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0x658aaaec  // bfcvt z12.h, p2/M, z23.s\n"
+    ".inst 0xa0402be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc12711b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0x648aaacc  // bfcvtnt z12.h, p2/M, z22.s\n"
+    ".inst 0xc12611b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0x658aaaad  // bfcvt z13.h, p2/M, z21.s\n"
+    ".inst 0xa0412be6  // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xc12911d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0x648aaa8d  // bfcvtnt z13.h, p2/M, z20.s\n"
+    ".inst 0xc12811d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0x658aaa6e  // bfcvt z14.h, p2/M, z19.s\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc0066800  // mova { z0.d-z1.d }, za.d[x11, #0]\n"
+    ".inst 0xa0422be8  // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    ".inst 0x648aaa4e  // bfcvtnt z14.h, p2/M, z18.s\n"
+    ".inst 0xc0066822  // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+    ".inst 0xc1bfcb40  // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
+    "st1w { z0.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z2.s }, p1, [x0]\n"
+    "add x0, x0, x9, LSL #2\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0040b00  // mova za.d[x8, #0], { z24.d-z25.d }\n"
+    "st1w { z1.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040b01  // mova za.d[x8, #1], { z24.d-z25.d }\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "st1w { z3.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "bgt 11b\n"
+    "b 19f\n"
+    "12:"  // Padded
+    "cbz x21, 17f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 16f\n"
+    "cmp x21, #0x2\n"
+    "beq 15f\n"
+    "cmp x21, #0x3\n"
+    "beq 14f\n"
+    "13:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x20, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "addvl x19, SP, #24\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    "14:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x21, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "addvl x20, SP, #18\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "addvl x19, SP, #24\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    "15:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x22, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "addvl x21, SP, #12\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p0/Z, [x22]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    "16:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x23, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "addvl x22, SP, #6\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa0402ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    "addvl x21, SP, #12\n"
+    "add x23, x23, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    ".inst 0xa0412ac6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    "addvl x19, SP, #24\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p0/Z, [x23]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xa0422ac8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1257196  // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247197  // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12771b6  // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b7  // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc12971d6  // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d7  // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
+    "17:"  // Padded: 0 priming loads
+    ".inst 0xa0402be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be6  // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422be8  // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 20f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x15]\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x19, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "sub x16, x16, #0x1\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "sub x14, x14, #0x1\n"
+    "cmp x16, x14\n"
+    "csel x24, x16, x14, LT\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    "sub x14, x14, x24\n"
+    "cbz x24, 19f\n"
+    "18:"  // Padded: Main loop
+    "addvl x23, SP, #6\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402ae4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z23.s }, p0/Z, [x15]\n"
+    "add x21, x15, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z22.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412ae6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+    "subs x24, x24, #0x1\n"
+    "add x15, x15, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z21.s }, p0/Z, [x21]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z20.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412ac6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    "ld1w { z19.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422ae8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+    ".inst 0xc1257196  // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
+    "ld1w { z18.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1247197  // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    "ld1w { z17.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422ac8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc12771b6  // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b7  // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12971d6  // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d7  // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1251190  // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1241191  // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0x658aaaec  // bfcvt z12.h, p2/M, z23.s\n"
+    ".inst 0xa0402be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc12711b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0x648aaacc  // bfcvtnt z12.h, p2/M, z22.s\n"
+    ".inst 0xc12611b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0x658aaaad  // bfcvt z13.h, p2/M, z21.s\n"
+    ".inst 0xa0412be6  // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xc12911d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0x648aaa8d  // bfcvtnt z13.h, p2/M, z20.s\n"
+    ".inst 0xc12811d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0x658aaa6e  // bfcvt z14.h, p2/M, z19.s\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc0066800  // mova { z0.d-z1.d }, za.d[x11, #0]\n"
+    ".inst 0xa0422be8  // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    ".inst 0x648aaa4e  // bfcvtnt z14.h, p2/M, z18.s\n"
+    ".inst 0xc0066822  // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+    ".inst 0xc1bfcb40  // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
+    "st1w { z0.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z2.s }, p1, [x0]\n"
+    "add x0, x0, x9, LSL #2\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0040b00  // mova za.d[x8, #0], { z24.d-z25.d }\n"
+    "st1w { z1.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040b01  // mova za.d[x8, #1], { z24.d-z25.d }\n"
+    ".inst 0x648aaa0f  // bfcvtnt z15.h, p2/M, z16.s\n"
+    "st1w { z3.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "bgt 18b\n"
+    "19:"  // Main loop tail
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1257190  // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+    "addvl x21, SP, #12\n"
+    ".inst 0xc1247191  // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xc1257192  // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247193  // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc12771b0  // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b1  // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412ac6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc1257194  // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247195  // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc12771b2  // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b3  // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412aa6  // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc12971d0  // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d1  // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422ac8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1257196  // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1247197  // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xa0402a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc12771b4  // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b5  // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a86  // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc12971d2  // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d3  // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422aa8  // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc12771b6  // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12671b7  // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xa0412a66  // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc12971d4  // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d5  // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a88  // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc12971d6  // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12871d7  // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xa0422a68  // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1251190  // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
+    ".inst 0xc1241191  // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
+    ".inst 0xc12711b0  // bfdot za.s[x8, 0], { z13.h-z14.h }, z7.h\n"
+    ".inst 0xc12611b1  // bfdot za.s[x8, 1], { z13.h-z14.h }, z6.h\n"
+    ".inst 0xc12911d0  // bfdot za.s[x8, 0], { z14.h-z15.h }, z9.h\n"
+    ".inst 0xc12811d1  // bfdot za.s[x8, 1], { z14.h-z15.h }, z8.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc0066800  // mova { z0.d-z1.d }, za.d[x11, #0]\n"
+    ".inst 0xc0066822  // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+    ".inst 0xc1bfcb40  // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
+    "st1w { z0.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z2.s }, p1, [x0]\n"
+    "add x0, x0, x9, LSL #2\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0040b00  // mova za.d[x8, #0], { z24.d-z25.d }\n"
+    "st1w { z1.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040b01  // mova za.d[x8, #1], { z24.d-z25.d }\n"
+    "st1w { z3.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "20:"  // Main loop skip tail
+    "cbz x14, 22f\n"
+    "21:"  // Right padding loop
+    ".inst 0xc0066800  // mova { z0.d-z1.d }, za.d[x11, #0]\n"
+    "add x8, x8, #0x2\n"
+    "subs x14, x14, #0x1\n"
+    ".inst 0xc0066822  // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+    ".inst 0xc1bfcb40  // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
+    "st1w { z0.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z2.s }, p1, [x0]\n"
+    "add x0, x0, x9, LSL #2\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0040b00  // mova za.d[x8, #0], { z24.d-z25.d }\n"
+    "st1w { z1.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040b01  // mova za.d[x8, #1], { z24.d-z25.d }\n"
+    "st1w { z3.s }, p1, [x27]\n"
+    "add x27, x27, x25, LSL #2\n"
+    "bgt 21b\n"
+    "22:"  // End
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x19, ALL, MUL #16\n"
+    "incb x19, ALL, MUL #9\n"
+    "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x17\n"
+    "whilelt p1.s, x17, x7\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x15, x15, x19, LSL #2\n"
+    "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #30\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x0", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..c99cf51
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+);
+
+class sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za : public PlanarStrategy<float, float>
+{
+  using Parent = PlanarStrategy<float, float>;
+
+  public:
+  using return_type = float;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..eae8994
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,1246 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
+  const float *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const float *weights,
+  const float *bias,
+  float **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  float act_min,
+  float act_max
+)
+{
+  struct Args
+  {
+    const float *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const float *weights;
+    const float *bias;
+    long unsigned int input_cols, output_cols;
+    float **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+    float clamp_min, clamp_max;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
+
+  __asm__ __volatile__(
+    "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0xb\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x4\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p2.b\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ld1rw { z30.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+    "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x6\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z22.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+    "whilelt p8.s, XZR, x5\n"
+    "addvl SP, SP, #-15\n"
+    "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
+    "fmov z4.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z4.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x20, x19\n"
+    "ld1w { z31.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aabe1  // bfcvt z1.h, p2/M, z31.s\n"
+    "incb x19\n"
+    "ld1w { z13.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9a9  // bfcvt z9.h, p2/M, z13.s\n"
+    "addvl x23, SP, #15\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x648aaa01  // bfcvtnt z1.h, p2/M, z16.s\n"
+    "addvl x23, x23, #-3\n"
+    "ld1w { z15.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    "st1h { z1.h }, p2, [x23]\n"
+    ".inst 0x648aaa49  // bfcvtnt z9.h, p2/M, z18.s\n"
+    "ld1w { z31.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aabe1  // bfcvt z1.h, p2/M, z31.s\n"
+    "incb x19\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+    ".inst 0x658aa9e2  // bfcvt z2.h, p2/M, z15.s\n"
+    "ld1w { z13.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9a9  // bfcvt z9.h, p2/M, z13.s\n"
+    ".inst 0x648aaa01  // bfcvtnt z1.h, p2/M, z16.s\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+    "addvl x23, x23, #-3\n"
+    "ld1w { z15.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    "st1h { z1.h }, p2, [x23]\n"
+    ".inst 0x648aaa49  // bfcvtnt z9.h, p2/M, z18.s\n"
+    "ld1w { z31.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "incb x19\n"
+    ".inst 0x658aabe1  // bfcvt z1.h, p2/M, z31.s\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9e2  // bfcvt z2.h, p2/M, z15.s\n"
+    "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+    "ld1w { z13.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9a9  // bfcvt z9.h, p2/M, z13.s\n"
+    "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "addvl x23, x23, #-3\n"
+    ".inst 0x648aaa01  // bfcvtnt z1.h, p2/M, z16.s\n"
+    "ld1w { z15.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    "st1h { z1.h }, p2, [x23]\n"
+    ".inst 0x648aaa49  // bfcvtnt z9.h, p2/M, z18.s\n"
+    "ld1w { z31.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aabe1  // bfcvt z1.h, p2/M, z31.s\n"
+    "incb x19\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9e2  // bfcvt z2.h, p2/M, z15.s\n"
+    "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+    "ld1w { z13.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x648aaa01  // bfcvtnt z1.h, p2/M, z16.s\n"
+    ".inst 0x658aa9a9  // bfcvt z9.h, p2/M, z13.s\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+    "ld1w { z15.s }, p2/Z, [x20]\n"
+    "mov x20, x19\n"
+    "addvl x23, x23, #-3\n"
+    "st1h { z1.h }, p2, [x23]\n"
+    "ld1w { z31.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x648aaa49  // bfcvtnt z9.h, p2/M, z18.s\n"
+    "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+    "ld1w { z16.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aabe1  // bfcvt z1.h, p2/M, z31.s\n"
+    ".inst 0x658aa9e2  // bfcvt z2.h, p2/M, z15.s\n"
+    "ld1w { z13.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    ".inst 0x658aa9a9  // bfcvt z9.h, p2/M, z13.s\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ld1w { z18.s }, p2/Z, [x20]\n"
+    "incb x20, ALL, MUL #5\n"
+    "sub x19, x17, #0x1\n"
+    "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+    "ld1w { z15.s }, p2/Z, [x20]\n"
+    "orr x22, x19, %x[ld_in_col], LSL #18\n"
+    "addvl x23, x23, #-3\n"
+    "mov z5.d, z4.d\n"
+    "orr x22, x6, x22, LSL #20\n"
+    "mov x21, #0xb\n"
+    "mov z6.d, z4.d\n"
+    "mov z7.d, z4.d\n"
+    "add x20, x5, x4\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    ".inst 0x648aaa01  // bfcvtnt z1.h, p2/M, z16.s\n"
+    "st1h { z1.h }, p2, [x23]\n"
+    ".inst 0x648aaa49  // bfcvtnt z9.h, p2/M, z18.s\n"
+    "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+    ".inst 0x658aa9e2  // bfcvt z2.h, p2/M, z15.s\n"
+    "mov x8, #0x0\n"
+    "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+    "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x22, x22, #0x2\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x5, x16\n"
+    "3:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col], LSL #2\n"
+    "bgt 3b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x2\n"
+    "msub x16, x5, x19, x16\n"
+    ".inst 0xc0040c80  // mova za.d[x8, #0], { z4.d-z7.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040c81  // mova za.d[x8, #1], { z4.d-z7.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x14, x13, [x24], #0x10\n"
+    ".inst 0xc0040c82  // mova za.d[x8, #2], { z4.d-z7.d }\n"
+    "ldp x11, x10, [x19], #0x10\n"
+    ".inst 0xc0040c83  // mova za.d[x8, #3], { z4.d-z7.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    "ldp x9, x28, [x24], #0x10\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "cbz x20, 5f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 5f\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "and x21, x20, #0x1\n"
+    "add x20, x20, #0x1\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    "lsr x20, x20, #0x1\n"
+    "sub x15, x15, x20\n"
+    "4:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "add x14, x14, x11, LSL #2\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "bgt 4b\n"
+    "5:"  // Left padding: End
+    "adds XZR, x5, x4\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 9f\n"
+    "cmp x21, #0x2\n"
+    "beq 8f\n"
+    "cmp x21, #0x3\n"
+    "beq 7f\n"
+    "6:"  // Unpadded: 4 priming loads
+    "add x20, x16, %x[ld_in_row], LSL #2\n"
+    "ld1w { z23.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaaea  // bfcvt z10.h, p2/M, z23.s\n"
+    "addvl x19, SP, #12\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "7:"  // Unpadded: 3 priming loads
+    "add x20, x16, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "addvl x19, SP, #9\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "addvl x20, SP, #6\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "addvl x19, SP, #12\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "addvl x20, SP, #3\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "addvl x19, SP, #9\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x21]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be1  // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 20f\n"
+    "add x20, x16, %x[ld_in_row], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "sub x17, x17, #0x2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "sub x15, x15, #0x1\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "lsr x19, x17, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    "cmp x19, x15\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "csel x25, x19, x15, LT\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "and x17, x17, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "sub x15, x15, x25\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "cbz x25, 19f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "addvl x24, SP, #6\n"
+    "addvl x23, SP, #12\n"
+    "ld1w { z18.s }, p1/Z, [x16]\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402b01  // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+    "add x22, x16, %x[ld_in_row], LSL #2\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402ae1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+    "addvl x20, SP, #9\n"
+    "add x19, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1311152  // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa4a  // bfcvt z10.h, p2/M, z18.s\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+    ".inst 0x648aaa2a  // bfcvtnt z10.h, p2/M, z17.s\n"
+    "subs x25, x25, #0x1\n"
+    "ld1w { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391172  // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x648aaa2b  // bfcvtnt z11.h, p2/M, z17.s\n"
+    "ld1w { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0xc1321192  // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x8, x8, #0x1\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x648aaa2c  // bfcvtnt z12.h, p2/M, z17.s\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x14, x14, x11, LSL #2\n"
+    ".inst 0xa1402aa1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x22, x22, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x22]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "ld1w { z16.s }, p1/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1w { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1w { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa2d  // bfcvt z13.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa6c  // bfcvtnt z12.h, p2/M, z19.s\n"
+    ".inst 0x648aaa4d  // bfcvtnt z13.h, p2/M, z18.s\n"
+    "ld1w { z16.s }, p1/Z, [x19]\n"
+    ".inst 0x648aaa2e  // bfcvtnt z14.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xa1402be1  // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "bgt 11b\n"
+    "b 19f\n"
+    "12:"  // Padded
+    "cbz x21, 17f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 16f\n"
+    "cmp x21, #0x2\n"
+    "beq 15f\n"
+    "cmp x21, #0x3\n"
+    "beq 14f\n"
+    "13:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x20, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "addvl x19, SP, #12\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "14:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x20, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "addvl x19, SP, #9\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "15:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "addvl x20, SP, #6\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "addvl x19, SP, #12\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "16:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "addvl x20, SP, #3\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "addvl x19, SP, #9\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "17:"  // Padded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be1  // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 20f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x19, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "sub x17, x17, #0x2\n"
+    "sub x15, x15, #0x1\n"
+    "lsr x19, x17, #0x1\n"
+    "cmp x19, x15\n"
+    "csel x23, x19, x15, LT\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "and x17, x17, #0x1\n"
+    "sub x15, x15, x23\n"
+    "cbz x23, 19f\n"
+    "18:"  // Padded: Main loop
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "addvl x22, SP, #6\n"
+    "addvl x20, SP, #12\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402ac1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    "add x19, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    "addvl x21, SP, #3\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    ".inst 0xc1311152  // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "mov x12, #0x4\n"
+    "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0xc1391172  // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0x658aaa2b  // bfcvt z11.h, p2/M, z17.s\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x648aaa8a  // bfcvtnt z10.h, p2/M, z20.s\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1321192  // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "addvl x20, SP, #9\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x8\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa6b  // bfcvtnt z11.h, p2/M, z19.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa4c  // bfcvtnt z12.h, p2/M, z18.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0x648aaa2d  // bfcvtnt z13.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402aa1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    "add x19, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z21.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x658aaa2f  // bfcvt z15.h, p2/M, z17.s\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "mov x12, #0x4\n"
+    "ld1w { z20.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa2b  // bfcvt z11.h, p2/M, z17.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "subs x23, x23, #0x1\n"
+    "ld1w { z19.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z18.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0x658aaa2d  // bfcvt z13.h, p2/M, z17.s\n"
+    "ld1w { z17.s }, p0/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "ld1w { z16.s }, p0/Z, [x19]\n"
+    "add x14, x14, x11, LSL #2\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    ".inst 0xa1402be1  // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+    ".inst 0x648aaaaa  // bfcvtnt z10.h, p2/M, z21.s\n"
+    ".inst 0x648aaa8b  // bfcvtnt z11.h, p2/M, z20.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+    ".inst 0x648aaa6c  // bfcvtnt z12.h, p2/M, z19.s\n"
+    ".inst 0x648aaa4d  // bfcvtnt z13.h, p2/M, z18.s\n"
+    ".inst 0x648aaa2e  // bfcvtnt z14.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "bgt 18b\n"
+    "19:"  // Main loop tail
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402ae1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402ac1  // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #3\n"
+    "addvl x19, SP, #9\n"
+    ".inst 0xc1311152  // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z20.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z17.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "mov x12, #0x4\n"
+    "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0xc1391172  // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "ld1w { z19.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0x658aaa2b  // bfcvt z11.h, p2/M, z17.s\n"
+    "ld1w { z18.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x648aaa8a  // bfcvtnt z10.h, p2/M, z20.s\n"
+    "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0xc1321192  // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    "add x16, x16, %x[ld_in_col], LSL #2\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    "mov x12, #0x8\n"
+    "ld1w { z17.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa6b  // bfcvtnt z11.h, p2/M, z19.s\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa4c  // bfcvtnt z12.h, p2/M, z18.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0x648aaa2d  // bfcvtnt z13.h, p2/M, z17.s\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "add x14, x14, x11, LSL #2\n"
+    ".inst 0xa1402be1  // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "20:"  // Main loop skip tail
+    "cbz x17, 21f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x16]\n"
+    ".inst 0x658aaa0a  // bfcvt z10.h, p2/M, z16.s\n"
+    "add x21, x16, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0a  // bfcvtnt z10.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0b  // bfcvt z11.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0b  // bfcvtnt z11.h, p2/M, z16.s\n"
+    "mov x12, #0x4\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0c  // bfcvt z12.h, p2/M, z16.s\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x648aaa0c  // bfcvtnt z12.h, p2/M, z16.s\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x658aaa0d  // bfcvt z13.h, p2/M, z16.s\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x8\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0d  // bfcvtnt z13.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0e  // bfcvt z14.h, p2/M, z16.s\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x648aaa0e  // bfcvtnt z14.h, p2/M, z16.s\n"
+    ".inst 0xc1311150  // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+    "addvl x20, SP, #6\n"
+    "add x21, x21, %x[ld_in_row], LSL #2\n"
+    ".inst 0xc1391170  // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a81  // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "addvl x19, SP, #12\n"
+    ".inst 0xc1311151  // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+    "ld1w { z16.s }, p0/Z, [x21]\n"
+    ".inst 0x658aaa0f  // bfcvt z15.h, p2/M, z16.s\n"
+    "sub x15, x15, #0x1\n"
+    ".inst 0xc1391171  // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xa1402a61  // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1311152  // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
+    ".inst 0xc1321190  // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1391172  // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
+    ".inst 0xc1321191  // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+    "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "add x14, x14, x11, LSL #2\n"
+    ".inst 0xc1321192  // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
+    "add x8, x8, #0x1\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "21:"  // Tail input: End
+    "cbz x15, 23f\n"
+    "22:"  // Right padding loop
+    ".inst 0xc0060c18  // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "subs x15, x15, #0x1\n"
+    ".inst 0xc1b6cbd8  // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
+    "st1w { z24.s }, p1, [x14]\n"
+    "add x14, x14, x11, LSL #2\n"
+    ".inst 0xc0040c84  // mova za.d[x8, #4], { z4.d-z7.d }\n"
+    "st1w { z25.s }, p1, [x13]\n"
+    "add x13, x13, x10, LSL #2\n"
+    "st1w { z26.s }, p1, [x9]\n"
+    "add x9, x9, x27, LSL #2\n"
+    "st1w { z27.s }, p1, [x28]\n"
+    "add x28, x28, x26, LSL #2\n"
+    "bgt 22b\n"
+    "23:"  // End
+    "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "incb x19, ALL, MUL #16\n"
+    "incb x19, ALL, MUL #9\n"
+    "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x7\n"
+    "whilelt p1.s, x7, x6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x16, x16, x19, LSL #2\n"
+    "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20, LSL #2\n"
+    "add x21, x21, x19, LSL #2\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #15\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..be4f02f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_s8q_planar_3x3_s1_4rows_dot_za : public PlanarStrategy<int8_t, int8_t>
+{
+  using Parent = PlanarStrategy<int8_t, int8_t>;
+
+  public:
+  using return_type = int8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_s8q_planar_3x3_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_s8q_planar_3x3_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..7fee92b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const int8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    int8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x6\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-12\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z24.h, p2/M, z24.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z8.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z20.h, #0x0\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "incw x21\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov x19, x21\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "addvl x20, SP, #12\n"
+    "incw x21\n"
+    "addvl x20, x20, #-4\n"
+    "mov x19, x21\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "addvl x20, x20, #-4\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z9.d, z8.d\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "addvl x20, x20, #-4\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x6\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046900  // mova za.d[x11, #0], { z8.d-z9.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046901  // mova za.d[x11, #1], { z8.d-z9.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046902  // mova za.d[x11, #2], { z8.d-z9.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    ".inst 0xc0046903  // mova za.d[x11, #3], { z8.d-z9.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #8\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x21, x14, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x14]\n"
+    "addvl x20, SP, #4\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #8\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x1\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x15, x13\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z14.h, z14.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x21, SP, #4\n"
+    "addvl x20, SP, #8\n"
+    "ld1sb { z21.s }, p1/Z, [x14]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402aa0  // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "ld1sb { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412aa2  // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "ld1sb { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "ld1sb { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "add z13.h, z13.h, z24.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "add z14.h, z14.h, z24.h\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add z15.h, z15.h, z24.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #8\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #4\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "15:"  // Padded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "sub x15, x15, #0x1\n"
+    "sub x13, x13, #0x1\n"
+    "cmp x15, x13\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "16:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z21.s }, p0/Z, [x14]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    "add z21.h, p0/M, z21.h, z24.h\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "add z20.h, p0/M, z20.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x21]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    "mov x12, #0x4\n"
+    "addvl x20, SP, #4\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "subs x22, x22, #0x1\n"
+    "ld1sb { z17.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    "ld1sb { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x20, SP, #4\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "18:"  // Main loop skip tail
+    "cbz x13, 20f\n"
+    "19:"  // Right padding loop
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 19b\n"
+    "20:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #12\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000..a9538ac
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s2_2rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const int8_t *inptr;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    int8_t **outptrs;
+    const size_t *ld_out_cols;
+    long unsigned int n, n_channels;
+  };
+
+  Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x5\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x11\n"
+    "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p0.b\n"
+    "mov z12.s, #0x0\n"
+    "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p5.s, XZR, x22\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "whilelt p8.s, XZR, x10\n"
+    "eor p8.b, p0/Z, p8.b, p9.b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+    "cbz x19, 1f\n"
+    "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "1:"  // Load bias: Done
+    "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+    "ld1sb { z27.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "mov z0.h, #0x0\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "mov z13.d, z12.d\n"
+    "ld1sb { z22.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z21.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z20.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z24.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z19.s }, p0/Z, [x20]\n"
+    "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z27.h, z27.h, z28.h\n"
+    "sub z16.h, z16.h, z28.h\n"
+    "sub z22.h, z22.h, z28.h\n"
+    "sub z21.h, z21.h, z28.h\n"
+    "trn1 z8.h, z27.h, z21.h\n"
+    "sub z20.h, z20.h, z28.h\n"
+    "sub z18.h, z18.h, z28.h\n"
+    "trn1 z7.h, z16.h, z20.h\n"
+    "sub z17.h, z17.h, z28.h\n"
+    "sub z24.h, z24.h, z28.h\n"
+    "trn1 z6.h, z17.h, z0.h\n"
+    "sub z19.h, z19.h, z28.h\n"
+    "trn1 z5.h, z24.h, z0.h\n"
+    "trn1 z4.h, z22.h, z18.h\n"
+    "trn1 z3.h, z19.h, z0.h\n"
+    "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "cbz x19, 2f\n"
+    "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "2:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "3:"  // Load right_shift: End
+    "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "orr x21, x28, %x[ld_in_col], LSL #16\n"
+    "orr x21, x22, x21, LSL #22\n"
+    "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+    "mov x20, #0x5\n"
+    "add x19, x10, x11\n"
+    "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "mov x9, #0x0\n"
+    "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x21, x21, #0x0\n"
+    "sub x20, x20, x19\n"
+    "mov x19, x27\n"
+    "4:"  // Issue prefetches
+    "subs x20, x20, #0x1\n"
+    ".inst 0xf8b54a7c  // rprfm pldstrm, x21, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 4b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x27, x10, x19, x27\n"
+    ".inst 0xc0042980  // mova za.d[x9, #0], { z12.d-z13.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0042981  // mova za.d[x9, #1], { z12.d-z13.d }\n"
+    "mov x25, #0x2\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "ldp x24, x23, [x21], #0x10\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 6f\n"
+    "cmp x20, x25\n"
+    "csel x19, x20, x25, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x25, x25, x19\n"
+    "cbz x20, 6f\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "and x25, x20, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "sub x26, x26, x20\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "5:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 5b\n"
+    "6:"  // Left padding: End
+    "adds XZR, x10, x11\n"
+    "bne 11f\n"
+    "cbz x25, 9f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 8f\n"
+    "7:"  // Unpadded: 2 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1sb { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1sb { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1sb { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1sb { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1sb { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "8:"  // Unpadded: 1 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1sb { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1sb { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1sb { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1sb { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1sb { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "9:"  // Unpadded: 0 priming loads
+    "add x20, x27, %x[ld_in_row]\n"
+    "ld1sb { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "ld1sb { z18.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "sub x26, x26, #0x1\n"
+    "ld1sb { z15.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "lsr x19, x28, #0x1\n"
+    "ld1sb { z17.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "cmp x19, x26\n"
+    "ld1sb { z16.s }, p5/Z, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "csel x20, x19, x26, LT\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "10:"  // Unpadded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1sb { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1sb { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1sb { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1sb { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1sb { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1sb { z14.s }, p5/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "ld1sb { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "ld1sb { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "ld1sb { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "ld1sb { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 10b\n"
+    "b 16f\n"
+    "11:"  // Padded
+    "cbz x25, 14f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 13f\n"
+    "12:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "13:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "14:"  // Padded: 0 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "sub x26, x26, #0x1\n"
+    "lsr x19, x28, #0x1\n"
+    "mov z16.d, z16.d\n"
+    "cmp x19, x26\n"
+    "csel x20, x19, x26, LT\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "15:"  // Padded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "bgt 15b\n"
+    "16:"  // Main loop tail
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add x9, x9, #0x1\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "cbz x28, 17f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "sub x26, x26, #0x1\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "17:"  // Tail input: End
+    "cbz x26, 19f\n"
+    "18:"  // Right padding loop
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "subs x26, x26, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 18b\n"
+    "19:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..d14d662
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_s8q_planar_3x3_s2_4rows_dot_za : public PlanarStrategy<int8_t, int8_t>
+{
+  using Parent = PlanarStrategy<int8_t, int8_t>;
+
+  public:
+  using return_type = int8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_s8q_planar_3x3_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_s8q_planar_3x3_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..fd35da4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const int8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    int8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x9\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z5.h, p2/M, z5.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z0.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "incw x21\n"
+    "mov z17.h, #0x0\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "mov x19, x21\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "addvl x20, SP, #6\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "incw x21\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "mov z1.d, z0.d\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "addvl x20, x20, #-2\n"
+    "mov z2.d, z0.d\n"
+    "mov z3.d, z0.d\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x9\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046c00  // mova za.d[x11, #0], { z0.d-z3.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046c01  // mova za.d[x11, #1], { z0.d-z3.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1sb { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #4\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1sb { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #2\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1sb { z12.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x2\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x15, #0x1\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "cmp x19, x13\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x22, x19, x13, LT\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "addvl x20, SP, #2\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p1/Z, [x14]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1sb { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "ld1sb { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z12.h, z12.h, z5.h\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "ld1sb { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1sb { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ld1sb { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1sb { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "ld1sb { z12.s }, p1/Z, [x14]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "ld1sb { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "ld1sb { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "ld1sb { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "ld1sb { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "ld1sb { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x26, x26, x24\n"
+    "ld1sb { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z13.h, z13.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "add z14.h, z14.h, z5.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "add z16.h, z16.h, z5.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #4\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #2\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "15:"  // Padded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "sub x15, x15, #0x2\n"
+    "sub x13, x13, #0x1\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "lsr x19, x15, #0x1\n"
+    "cmp x19, x13\n"
+    "mov z16.d, z16.d\n"
+    "csel x21, x19, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x21\n"
+    "cbz x21, 17f\n"
+    "16:"  // Padded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1sb { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x20]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "mov x12, #0x8\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "addvl x19, SP, #2\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "mov z16.d, z16.d\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #2\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "18:"  // Main loop skip tail
+    "cbz x15, 19f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #4\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "sub x13, x13, #0x1\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "19:"  // Tail input: End
+    "cbz x13, 21f\n"
+    "20:"  // Right padding loop
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 20b\n"
+    "21:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #6\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..6f3290f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_s8q_planar_5x5_s1_4rows_dot_za : public PlanarStrategy<int8_t, int8_t>
+{
+  using Parent = PlanarStrategy<int8_t, int8_t>;
+
+  public:
+  using return_type = int8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_s8q_planar_5x5_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_s8q_planar_5x5_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..722fd5e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const int8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    int8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x8\n"
+    "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x5\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x7\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x6\n"
+    "addvl SP, SP, #-30\n"
+    "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z25.h, p2/M, z25.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z6.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x21, x23\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z2.h, #0x0\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "incw x23\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "addvl x20, SP, #30\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "incw x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "incw x23\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "mov x21, x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "incw x23\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "mov z7.d, z6.d\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "addvl x20, x20, #-6\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x16, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x7, x22, LSL #22\n"
+    "mov x21, #0x8\n"
+    "add x20, x6, x5\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "mov x8, #0x8\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x6, x15\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x15, x6, x19, x15\n"
+    ".inst 0xc00468c0  // mova za.d[x11, #0], { z6.d-z7.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc00468c1  // mova za.d[x11, #1], { z6.d-z7.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x13, x4, [x24], #0x10\n"
+    ".inst 0xc00468c2  // mova za.d[x11, #2], { z6.d-z7.d }\n"
+    "ldp x10, x9, [x19], #0x10\n"
+    ".inst 0xc00468c3  // mova za.d[x11, #3], { z6.d-z7.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc00468c4  // mova za.d[x11, #4], { z6.d-z7.d }\n"
+    "ldp x28, x27, [x24], #0x10\n"
+    ".inst 0xc00468c5  // mova za.d[x11, #5], { z6.d-z7.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc00468c6  // mova za.d[x11, #6], { z6.d-z7.d }\n"
+    ".inst 0xc00468c7  // mova za.d[x11, #7], { z6.d-z7.d }\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "sub x14, x14, x20\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x6, x5\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x15, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x15]\n"
+    "addvl x19, SP, #24\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z29.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z29.h, z16.h, z29.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x21, x15, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x15]\n"
+    "addvl x20, SP, #18\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x22, x15, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x15]\n"
+    "addvl x21, SP, #12\n"
+    "ld1sb { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1sb { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1sb { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z16.s }, p1/Z, [x22]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x23, x15, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x15]\n"
+    "addvl x22, SP, #6\n"
+    "ld1sb { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x21, SP, #12\n"
+    "ld1sb { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1sb { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x23]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x15]\n"
+    "sub x16, x16, #0x1\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "sub x14, x14, #0x1\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x16, x14\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z28.h, z28.h, z25.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub x14, x14, x24\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    "cbz x24, 21f\n"
+    "13:"  // Unpadded: Main loop
+    "addvl x23, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #12\n"
+    "ld1sb { z23.s }, p1/Z, [x15]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ae0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+    "addvl x21, SP, #18\n"
+    "addvl x20, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1sb { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "subs x24, x24, #0x1\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "ld1sb { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ae4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1sb { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1sb { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aea  // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "add z29.h, z29.h, z25.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "add z30.h, z30.h, z25.h\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x20, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #24\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "addvl x22, SP, #6\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "addvl x19, SP, #24\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "19:"  // Padded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "sub x16, x16, #0x1\n"
+    "sub x14, x14, #0x1\n"
+    "cmp x16, x14\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "sub x14, x14, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z23.s }, p0/Z, [x15]\n"
+    "add z23.h, p0/M, z23.h, z25.h\n"
+    "add x23, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z22.s }, p0/Z, [x23]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x21, SP, #12\n"
+    "add z22.h, p0/M, z22.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    "ld1sb { z21.s }, p0/Z, [x23]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "add z21.h, p0/M, z21.h, z25.h\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    "mov x12, #0x4\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z20.s }, p0/Z, [x23]\n"
+    "add z20.h, p0/M, z20.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1sb { z19.s }, p0/Z, [x23]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1sb { z18.s }, p0/Z, [x23]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1sb { z17.s }, p0/Z, [x23]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1sb { z16.s }, p0/Z, [x23]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x21, SP, #12\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "22:"  // Main loop skip tail
+    "cbz x14, 24f\n"
+    "23:"  // Right padding loop
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "add x8, x8, #0x2\n"
+    "subs x14, x14, #0x1\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 23b\n"
+    "24:"  // End
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x23, ALL, MUL #16\n"
+    "incw x23, ALL, MUL #9\n"
+    "str x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x17\n"
+    "whilelt p1.s, x17, x7\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x15, x15, x19\n"
+    "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #30\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..e7a781d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_s8q_planar_5x5_s2_4rows_dot_za : public PlanarStrategy<int8_t, int8_t>
+{
+  using Parent = PlanarStrategy<int8_t, int8_t>;
+
+  public:
+  using return_type = int8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_s8q_planar_5x5_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_s8q_planar_5x5_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..81829b5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
+  const int8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  int8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const int8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    int8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0xb\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x4\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x6\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x5\n"
+    "addvl SP, SP, #-15\n"
+    "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z9.h, p2/M, z9.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z28.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "incw x21\n"
+    "mov z14.h, #0x0\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "addvl x20, SP, #15\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "incw x21\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "incw x21\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "incw x21\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "addvl x20, x20, #-3\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "mov z29.d, z28.d\n"
+    "mov z30.d, z28.d\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z31.d, z28.d\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "addvl x20, x20, #-3\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x17, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x6, x22, LSL #22\n"
+    "mov x21, #0xb\n"
+    "add x20, x5, x4\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x8, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x5, x16\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x16, x5, x19, x16\n"
+    ".inst 0xc0040f80  // mova za.d[x8, #0], { z28.d-z31.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f81  // mova za.d[x8, #1], { z28.d-z31.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x14, x13, [x24], #0x10\n"
+    ".inst 0xc0040f82  // mova za.d[x8, #2], { z28.d-z31.d }\n"
+    "ldp x11, x10, [x19], #0x10\n"
+    ".inst 0xc0040f83  // mova za.d[x8, #3], { z28.d-z31.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ldp x9, x28, [x24], #0x10\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "sub x15, x15, x20\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x5, x4\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #12\n"
+    "ld1sb { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #9\n"
+    "ld1sb { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #6\n"
+    "ld1sb { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #12\n"
+    "ld1sb { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #3\n"
+    "ld1sb { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #9\n"
+    "ld1sb { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1sb { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "sub x17, x17, #0x2\n"
+    "ld1sb { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "sub x15, x15, #0x1\n"
+    "ld1sb { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x17, #0x1\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "cmp x19, x15\n"
+    "ld1sb { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x25, x19, x15, LT\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "and x17, x17, #0x1\n"
+    "ld1sb { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1sb { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    "sub x15, x15, x25\n"
+    "cbz x25, 21f\n"
+    "13:"  // Unpadded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x24, SP, #6\n"
+    "addvl x23, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402b02  // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+    "add x22, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "addvl x20, SP, #9\n"
+    "subs x25, x25, #0x1\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z21.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z12.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "ld1sb { z20.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z13.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "ld1sb { z19.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "ld1sb { z18.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "ld1sb { z17.s }, p1/Z, [x22]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "ld1sb { z16.s }, p1/Z, [x22]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1sb { z11.s }, p1/Z, [x16]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "ld1sb { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "ld1sb { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "ld1sb { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x28, x28, x26\n"
+    "ld1sb { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1sb { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1sb { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z13.h, z13.h, z9.h\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1sb { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1sb { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1sb { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1sb { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #12\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #9\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #6\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #3\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #9\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "19:"  // Padded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "sub x17, x17, #0x2\n"
+    "sub x15, x15, #0x1\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "lsr x19, x17, #0x1\n"
+    "cmp x19, x15\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "csel x24, x19, x15, LT\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "and x17, x17, #0x1\n"
+    "sub x15, x15, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #9\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "mov x12, #0x4\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x21, x16, %x[ld_in_row]\n"
+    "addvl x20, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x19, SP, #9\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x21]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1sb { z12.s }, p0/Z, [x21]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1sb { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1sb { z13.s }, p0/Z, [x21]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x21]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x21]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x21]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x21]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1sb { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "22:"  // Main loop skip tail
+    "cbz x17, 23f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1sb { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1sb { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1sb { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1sb { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1sb { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "addvl x20, SP, #6\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #12\n"
+    "sub x15, x15, #0x1\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "add x8, x8, #0x1\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "23:"  // Tail input: End
+    "cbz x15, 25f\n"
+    "24:"  // Right padding loop
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "subs x15, x15, #0x1\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 24b\n"
+    "25:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #16\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x7\n"
+    "whilelt p1.s, x7, x6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x16, x16, x19\n"
+    "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #15\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..875a9f8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8q_planar_3x3_s1_4rows_dot_za : public PlanarStrategy<uint8_t, uint8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, uint8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8q_planar_3x3_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8q_planar_3x3_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..d59879b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const uint8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x6\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-12\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z24.h, p2/M, z24.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z8.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1b { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z20.h, #0x0\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "incw x21\n"
+    "ld1b { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov x19, x21\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1b { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1b { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "addvl x20, SP, #12\n"
+    "incw x21\n"
+    "addvl x20, x20, #-4\n"
+    "mov x19, x21\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1b { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "ld1b { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "addvl x20, x20, #-4\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z9.d, z8.d\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "addvl x20, x20, #-4\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x6\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046900  // mova za.d[x11, #0], { z8.d-z9.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046901  // mova za.d[x11, #1], { z8.d-z9.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046902  // mova za.d[x11, #2], { z8.d-z9.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    ".inst 0xc0046903  // mova za.d[x11, #3], { z8.d-z9.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #8\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x21, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "addvl x20, SP, #4\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #8\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x1\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x15, x13\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z14.h, z14.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x21, SP, #4\n"
+    "addvl x20, SP, #8\n"
+    "ld1b { z21.s }, p1/Z, [x14]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402aa0  // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412aa2  // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "add z13.h, z13.h, z24.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "add z14.h, z14.h, z24.h\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add z15.h, z15.h, z24.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #8\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #4\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "15:"  // Padded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "sub x15, x15, #0x1\n"
+    "sub x13, x13, #0x1\n"
+    "cmp x15, x13\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "16:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z21.s }, p0/Z, [x14]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    "add z21.h, p0/M, z21.h, z24.h\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "add z20.h, p0/M, z20.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x21]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    "mov x12, #0x4\n"
+    "addvl x20, SP, #4\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z17.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    "ld1b { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x20, SP, #4\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "18:"  // Main loop skip tail
+    "cbz x13, 20f\n"
+    "19:"  // Right padding loop
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 19b\n"
+    "20:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #12\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000..9a0840c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s2_2rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const uint8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    long unsigned int n, n_channels;
+  };
+
+  Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x5\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x11\n"
+    "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p0.b\n"
+    "mov z12.s, #0x0\n"
+    "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p5.s, XZR, x22\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "whilelt p8.s, XZR, x10\n"
+    "eor p8.b, p0/Z, p8.b, p9.b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+    "cbz x19, 1f\n"
+    "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "1:"  // Load bias: Done
+    "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+    "ld1b { z27.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "mov z0.h, #0x0\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "mov z13.d, z12.d\n"
+    "ld1b { z22.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z21.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z24.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z27.h, z27.h, z28.h\n"
+    "sub z16.h, z16.h, z28.h\n"
+    "sub z22.h, z22.h, z28.h\n"
+    "sub z21.h, z21.h, z28.h\n"
+    "trn1 z8.h, z27.h, z21.h\n"
+    "sub z20.h, z20.h, z28.h\n"
+    "sub z18.h, z18.h, z28.h\n"
+    "trn1 z7.h, z16.h, z20.h\n"
+    "sub z17.h, z17.h, z28.h\n"
+    "sub z24.h, z24.h, z28.h\n"
+    "trn1 z6.h, z17.h, z0.h\n"
+    "sub z19.h, z19.h, z28.h\n"
+    "trn1 z5.h, z24.h, z0.h\n"
+    "trn1 z4.h, z22.h, z18.h\n"
+    "trn1 z3.h, z19.h, z0.h\n"
+    "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "cbz x19, 2f\n"
+    "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "2:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "3:"  // Load right_shift: End
+    "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "orr x21, x28, %x[ld_in_col], LSL #16\n"
+    "orr x21, x22, x21, LSL #22\n"
+    "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+    "mov x20, #0x5\n"
+    "add x19, x10, x11\n"
+    "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "mov x9, #0x0\n"
+    "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x21, x21, #0x0\n"
+    "sub x20, x20, x19\n"
+    "mov x19, x27\n"
+    "4:"  // Issue prefetches
+    "subs x20, x20, #0x1\n"
+    ".inst 0xf8b54a7c  // rprfm pldstrm, x21, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 4b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x27, x10, x19, x27\n"
+    ".inst 0xc0042980  // mova za.d[x9, #0], { z12.d-z13.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0042981  // mova za.d[x9, #1], { z12.d-z13.d }\n"
+    "mov x25, #0x2\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "ldp x24, x23, [x21], #0x10\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 6f\n"
+    "cmp x20, x25\n"
+    "csel x19, x20, x25, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x25, x25, x19\n"
+    "cbz x20, 6f\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "and x25, x20, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "sub x26, x26, x20\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "5:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 5b\n"
+    "6:"  // Left padding: End
+    "adds XZR, x10, x11\n"
+    "bne 11f\n"
+    "cbz x25, 9f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 8f\n"
+    "7:"  // Unpadded: 2 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "8:"  // Unpadded: 1 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "9:"  // Unpadded: 0 priming loads
+    "add x20, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "ld1b { z18.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "sub x26, x26, #0x1\n"
+    "ld1b { z15.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "lsr x19, x28, #0x1\n"
+    "ld1b { z17.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "cmp x19, x26\n"
+    "ld1b { z16.s }, p5/Z, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "csel x20, x19, x26, LT\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "10:"  // Unpadded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 10b\n"
+    "b 16f\n"
+    "11:"  // Padded
+    "cbz x25, 14f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 13f\n"
+    "12:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "13:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "14:"  // Padded: 0 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "sub x26, x26, #0x1\n"
+    "lsr x19, x28, #0x1\n"
+    "mov z16.d, z16.d\n"
+    "cmp x19, x26\n"
+    "csel x20, x19, x26, LT\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "15:"  // Padded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "bgt 15b\n"
+    "16:"  // Main loop tail
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add x9, x9, #0x1\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "cbz x28, 17f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "sub x26, x26, #0x1\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "17:"  // Tail input: End
+    "cbz x26, 19f\n"
+    "18:"  // Right padding loop
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "subs x26, x26, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 18b\n"
+    "19:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..b878914
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8q_planar_3x3_s2_4rows_dot_za : public PlanarStrategy<uint8_t, uint8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, uint8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8q_planar_3x3_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8q_planar_3x3_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..bdf1ba6
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const uint8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x9\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z5.h, p2/M, z5.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z0.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "incw x21\n"
+    "mov z17.h, #0x0\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "mov x19, x21\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "addvl x20, SP, #6\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "incw x21\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "mov z1.d, z0.d\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "addvl x20, x20, #-2\n"
+    "mov z2.d, z0.d\n"
+    "mov z3.d, z0.d\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x9\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046c00  // mova za.d[x11, #0], { z0.d-z3.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046c01  // mova za.d[x11, #1], { z0.d-z3.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #4\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #2\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x2\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x15, #0x1\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "cmp x19, x13\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x22, x19, x13, LT\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "addvl x20, SP, #2\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z12.h, z12.h, z5.h\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "ld1b { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "ld1b { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x26, x26, x24\n"
+    "ld1b { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z13.h, z13.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "add z14.h, z14.h, z5.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "add z16.h, z16.h, z5.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #4\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #2\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "15:"  // Padded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "sub x15, x15, #0x2\n"
+    "sub x13, x13, #0x1\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "lsr x19, x15, #0x1\n"
+    "cmp x19, x13\n"
+    "mov z16.d, z16.d\n"
+    "csel x21, x19, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x21\n"
+    "cbz x21, 17f\n"
+    "16:"  // Padded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "mov x12, #0x8\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "addvl x19, SP, #2\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "mov z16.d, z16.d\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #2\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "18:"  // Main loop skip tail
+    "cbz x15, 19f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #4\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "sub x13, x13, #0x1\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "19:"  // Tail input: End
+    "cbz x13, 21f\n"
+    "20:"  // Right padding loop
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 20b\n"
+    "21:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #6\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..db0750e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8q_planar_5x5_s1_4rows_dot_za : public PlanarStrategy<uint8_t, uint8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, uint8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8q_planar_5x5_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8q_planar_5x5_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..4678e82
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const uint8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x8\n"
+    "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x5\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x7\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x6\n"
+    "addvl SP, SP, #-30\n"
+    "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z25.h, p2/M, z25.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z6.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x21, x23\n"
+    "ld1b { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z2.h, #0x0\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "incw x23\n"
+    "ld1b { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "ld1b { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1b { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1b { z15.s }, p2/Z, [x21]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1b { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1b { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1b { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "addvl x20, SP, #30\n"
+    "ld1b { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "incw x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "ld1b { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "incw x23\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1b { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1b { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1b { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1b { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1b { z15.s }, p2/Z, [x21]\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "mov x21, x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "incw x23\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1b { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1b { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1b { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1b { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1b { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1b { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1b { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1b { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1b { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1b { z15.s }, p2/Z, [x21]\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "mov z7.d, z6.d\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "addvl x20, x20, #-6\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x16, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x7, x22, LSL #22\n"
+    "mov x21, #0x8\n"
+    "add x20, x6, x5\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "mov x8, #0x8\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x6, x15\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x15, x6, x19, x15\n"
+    ".inst 0xc00468c0  // mova za.d[x11, #0], { z6.d-z7.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc00468c1  // mova za.d[x11, #1], { z6.d-z7.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x13, x4, [x24], #0x10\n"
+    ".inst 0xc00468c2  // mova za.d[x11, #2], { z6.d-z7.d }\n"
+    "ldp x10, x9, [x19], #0x10\n"
+    ".inst 0xc00468c3  // mova za.d[x11, #3], { z6.d-z7.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc00468c4  // mova za.d[x11, #4], { z6.d-z7.d }\n"
+    "ldp x28, x27, [x24], #0x10\n"
+    ".inst 0xc00468c5  // mova za.d[x11, #5], { z6.d-z7.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc00468c6  // mova za.d[x11, #6], { z6.d-z7.d }\n"
+    ".inst 0xc00468c7  // mova za.d[x11, #7], { z6.d-z7.d }\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "sub x14, x14, x20\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x6, x5\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z29.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z29.h, z16.h, z29.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x21, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x22, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x21, SP, #12\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x23, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x22, SP, #6\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x21, SP, #12\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "sub x16, x16, #0x1\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "sub x14, x14, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x16, x14\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z28.h, z28.h, z25.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub x14, x14, x24\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    "cbz x24, 21f\n"
+    "13:"  // Unpadded: Main loop
+    "addvl x23, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #12\n"
+    "ld1b { z23.s }, p1/Z, [x15]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ae0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+    "addvl x21, SP, #18\n"
+    "addvl x20, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1b { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "subs x24, x24, #0x1\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ae4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aea  // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "add z29.h, z29.h, z25.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "add z30.h, z30.h, z25.h\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x20, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #24\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x22, SP, #6\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "addvl x19, SP, #24\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "19:"  // Padded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "sub x16, x16, #0x1\n"
+    "sub x14, x14, #0x1\n"
+    "cmp x16, x14\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "sub x14, x14, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z23.s }, p0/Z, [x15]\n"
+    "add z23.h, p0/M, z23.h, z25.h\n"
+    "add x23, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z22.s }, p0/Z, [x23]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x21, SP, #12\n"
+    "add z22.h, p0/M, z22.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z21.s }, p0/Z, [x23]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "add z21.h, p0/M, z21.h, z25.h\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    "mov x12, #0x4\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z20.s }, p0/Z, [x23]\n"
+    "add z20.h, p0/M, z20.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z19.s }, p0/Z, [x23]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1b { z18.s }, p0/Z, [x23]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z17.s }, p0/Z, [x23]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z16.s }, p0/Z, [x23]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x21, SP, #12\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "22:"  // Main loop skip tail
+    "cbz x14, 24f\n"
+    "23:"  // Right padding loop
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "add x8, x8, #0x2\n"
+    "subs x14, x14, #0x1\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 23b\n"
+    "24:"  // End
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x23, ALL, MUL #16\n"
+    "incw x23, ALL, MUL #9\n"
+    "str x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x17\n"
+    "whilelt p1.s, x17, x7\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x15, x15, x19\n"
+    "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #30\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..9fa295b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8q_planar_5x5_s2_4rows_dot_za : public PlanarStrategy<uint8_t, uint8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, uint8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8q_planar_5x5_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8q_planar_5x5_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..84e8c8b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const uint8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const uint8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0xb\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x4\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x6\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x5\n"
+    "addvl SP, SP, #-15\n"
+    "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z9.h, p2/M, z9.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z28.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1b { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "incw x21\n"
+    "mov z14.h, #0x0\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "addvl x20, SP, #15\n"
+    "ld1b { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "incw x21\n"
+    "ld1b { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "ld1b { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1b { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "incw x21\n"
+    "ld1b { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1b { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "ld1b { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "incw x21\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1b { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "ld1b { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1b { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1b { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "ld1b { z16.s }, p2/Z, [x19]\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "addvl x20, x20, #-3\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "mov z29.d, z28.d\n"
+    "mov z30.d, z28.d\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z31.d, z28.d\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "addvl x20, x20, #-3\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x17, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x6, x22, LSL #22\n"
+    "mov x21, #0xb\n"
+    "add x20, x5, x4\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x8, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x5, x16\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x16, x5, x19, x16\n"
+    ".inst 0xc0040f80  // mova za.d[x8, #0], { z28.d-z31.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f81  // mova za.d[x8, #1], { z28.d-z31.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x14, x13, [x24], #0x10\n"
+    ".inst 0xc0040f82  // mova za.d[x8, #2], { z28.d-z31.d }\n"
+    "ldp x11, x10, [x19], #0x10\n"
+    ".inst 0xc0040f83  // mova za.d[x8, #3], { z28.d-z31.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ldp x9, x28, [x24], #0x10\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "sub x15, x15, x20\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x5, x4\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #12\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #9\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #6\n"
+    "ld1b { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #12\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #3\n"
+    "ld1b { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #9\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "sub x17, x17, #0x2\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "sub x15, x15, #0x1\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x17, #0x1\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "cmp x19, x15\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x25, x19, x15, LT\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "and x17, x17, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    "sub x15, x15, x25\n"
+    "cbz x25, 21f\n"
+    "13:"  // Unpadded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x24, SP, #6\n"
+    "addvl x23, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402b02  // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+    "add x22, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "addvl x20, SP, #9\n"
+    "subs x25, x25, #0x1\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z21.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "ld1b { z20.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "ld1b { z19.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "ld1b { z18.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "ld1b { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "ld1b { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x28, x28, x26\n"
+    "ld1b { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z13.h, z13.h, z9.h\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #12\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #9\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #6\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #3\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #9\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "19:"  // Padded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "sub x17, x17, #0x2\n"
+    "sub x15, x15, #0x1\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "lsr x19, x17, #0x1\n"
+    "cmp x19, x15\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "csel x24, x19, x15, LT\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "and x17, x17, #0x1\n"
+    "sub x15, x15, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #9\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "mov x12, #0x4\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x21, x16, %x[ld_in_row]\n"
+    "addvl x20, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x19, SP, #9\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x21]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x21]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1b { z13.s }, p0/Z, [x21]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x21]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x21]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x21]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x21]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "22:"  // Main loop skip tail
+    "cbz x17, 23f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "addvl x20, SP, #6\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #12\n"
+    "sub x15, x15, #0x1\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "add x8, x8, #0x1\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "23:"  // Tail input: End
+    "cbz x15, 25f\n"
+    "24:"  // Right padding loop
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "subs x15, x15, #0x1\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 24b\n"
+    "25:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #16\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x7\n"
+    "whilelt p1.s, x7, x6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x16, x16, x19\n"
+    "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #15\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..de574ff
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za : public PlanarStrategy<uint8_t, int8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, int8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..ad765ba
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x6\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-12\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z24.h, p2/M, z24.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z8.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z20.h, #0x0\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "incw x21\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov x19, x21\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "addvl x20, SP, #12\n"
+    "incw x21\n"
+    "addvl x20, x20, #-4\n"
+    "mov x19, x21\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "ld1sb { z27.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "ld1sb { z23.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z27.h, z27.h, z21.h\n"
+    "sub z23.h, z23.h, z21.h\n"
+    "addvl x20, x20, #-4\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z9.d, z8.d\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z20.h, z27.h\n"
+    "trn1 z1.h, z27.h, z23.h\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "addvl x20, x20, #-4\n"
+    "trn1 z2.h, z23.h, z16.h\n"
+    "trn1 z3.h, z16.h, z20.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x6\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046900  // mova za.d[x11, #0], { z8.d-z9.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046901  // mova za.d[x11, #1], { z8.d-z9.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046902  // mova za.d[x11, #2], { z8.d-z9.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    ".inst 0xc0046903  // mova za.d[x11, #3], { z8.d-z9.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #8\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x21, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "addvl x20, SP, #4\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #8\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "add z14.h, z14.h, z24.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x1\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z17.h, z16.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x15, x13\n"
+    "add z13.h, z13.h, z24.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z14.h, z14.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add z15.h, z15.h, z24.h\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x21, SP, #4\n"
+    "addvl x20, SP, #8\n"
+    "ld1b { z21.s }, p1/Z, [x14]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402aa0  // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412aa2  // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "add z13.h, z13.h, z24.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "add z14.h, z14.h, z24.h\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add z15.h, z15.h, z24.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #8\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #4\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    "15:"  // Padded: 0 priming loads
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "cbz x15, 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x14]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z19.h, z18.h\n"
+    "trn1 z14.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "sub x15, x15, #0x1\n"
+    "sub x13, x13, #0x1\n"
+    "cmp x15, x13\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "csel x22, x15, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "16:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z21.s }, p0/Z, [x14]\n"
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    "add z21.h, p0/M, z21.h, z24.h\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    "add z20.h, p0/M, z20.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z24.h\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x21]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    "mov x12, #0x4\n"
+    "addvl x20, SP, #4\n"
+    "add z18.h, p0/M, z18.h, z24.h\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z17.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z24.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    "ld1b { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "add z16.h, p0/M, z16.h, z24.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xa0402be0  // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z21.h, z20.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xa0412be2  // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "trn1 z14.h, z19.h, z18.h\n"
+    "trn1 z15.h, z17.h, z16.h\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc16175a8  // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+    "addvl x20, SP, #4\n"
+    "addvl x19, SP, #8\n"
+    ".inst 0xc16075a9  // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a80  // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc16375c8  // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275c9  // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a82  // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    ".inst 0xc16175aa  // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc16075ab  // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+    ".inst 0xa0402a60  // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc16175ac  // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc16075ad  // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc16375ca  // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    ".inst 0xc16275cb  // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+    ".inst 0xa0412a62  // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    ".inst 0xc16375cc  // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+    ".inst 0xc16275cd  // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    "18:"  // Main loop skip tail
+    "cbz x13, 20f\n"
+    "19:"  // Right padding loop
+    ".inst 0xc0066804  // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0066826  // mova { z6.d-z7.d }, za.d[x11, #1]\n"
+    ".inst 0xc1aaac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1abaa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+    ".inst 0xc0046904  // mova za.d[x11, #4], { z8.d-z9.d }\n"
+    ".inst 0xc1acab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
+    ".inst 0xc0046905  // mova za.d[x11, #5], { z8.d-z9.d }\n"
+    ".inst 0xc1bacec4  // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
+    "st1b { z4.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z5.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z7.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 19b\n"
+    "20:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #12\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000..328227f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    long unsigned int n, n_channels;
+  };
+
+  Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "mov x19, #0x5\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "sub x19, x19, x11\n"
+    "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ptrue p0.b\n"
+    "mov z12.s, #0x0\n"
+    "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p5.s, XZR, x22\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "whilelt p8.s, XZR, x10\n"
+    "eor p8.b, p0/Z, p8.b, p9.b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+    "cbz x19, 1f\n"
+    "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "1:"  // Load bias: Done
+    "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+    "ld1sb { z27.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "mov z0.h, #0x0\n"
+    "ld1sb { z16.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "mov z13.d, z12.d\n"
+    "ld1sb { z22.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z21.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z20.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z18.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z17.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z24.s }, p0/Z, [x20]\n"
+    "incw x20\n"
+    "ld1sb { z19.s }, p0/Z, [x20]\n"
+    "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z27.h, z27.h, z28.h\n"
+    "sub z16.h, z16.h, z28.h\n"
+    "sub z22.h, z22.h, z28.h\n"
+    "sub z21.h, z21.h, z28.h\n"
+    "trn1 z8.h, z27.h, z21.h\n"
+    "sub z20.h, z20.h, z28.h\n"
+    "sub z18.h, z18.h, z28.h\n"
+    "trn1 z7.h, z16.h, z20.h\n"
+    "sub z17.h, z17.h, z28.h\n"
+    "sub z24.h, z24.h, z28.h\n"
+    "trn1 z6.h, z17.h, z0.h\n"
+    "sub z19.h, z19.h, z28.h\n"
+    "trn1 z5.h, z24.h, z0.h\n"
+    "trn1 z4.h, z22.h, z18.h\n"
+    "trn1 z3.h, z19.h, z0.h\n"
+    "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "cbz x19, 2f\n"
+    "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "2:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+    "3:"  // Load right_shift: End
+    "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "orr x21, x28, %x[ld_in_col], LSL #16\n"
+    "orr x21, x22, x21, LSL #22\n"
+    "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+    "mov x20, #0x5\n"
+    "add x19, x10, x11\n"
+    "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "mov x9, #0x0\n"
+    "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "lsl x21, x21, #0x0\n"
+    "sub x20, x20, x19\n"
+    "mov x19, x27\n"
+    "4:"  // Issue prefetches
+    "subs x20, x20, #0x1\n"
+    ".inst 0xf8b54a7c  // rprfm pldstrm, x21, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 4b\n"
+    "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x27, x10, x19, x27\n"
+    ".inst 0xc0042980  // mova za.d[x9, #0], { z12.d-z13.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0042981  // mova za.d[x9, #1], { z12.d-z13.d }\n"
+    "mov x25, #0x2\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "ldp x24, x23, [x21], #0x10\n"
+    "ldp x22, x21, [x19], #0x10\n"
+    "cbz x20, 6f\n"
+    "cmp x20, x25\n"
+    "csel x19, x20, x25, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x25, x25, x19\n"
+    "cbz x20, 6f\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "and x25, x20, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "sub x26, x26, x20\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "5:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 5b\n"
+    "6:"  // Left padding: End
+    "adds XZR, x10, x11\n"
+    "bne 11f\n"
+    "cbz x25, 9f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 8f\n"
+    "7:"  // Unpadded: 2 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "8:"  // Unpadded: 1 priming loads
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "9:"  // Unpadded: 0 priming loads
+    "add x20, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "ld1b { z18.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "sub x26, x26, #0x1\n"
+    "ld1b { z15.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "lsr x19, x28, #0x1\n"
+    "ld1b { z17.s }, p5/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "cmp x19, x26\n"
+    "ld1b { z16.s }, p5/Z, [x20]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "csel x20, x19, x26, LT\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "10:"  // Unpadded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1b { z14.s }, p5/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "ld1b { z18.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    "ld1b { z15.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, z14.h, z21.h\n"
+    "sub z18.h, z18.h, z21.h\n"
+    "ld1b { z17.s }, p5/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z15.h, z15.h, z21.h\n"
+    "sub z17.h, z17.h, z21.h\n"
+    "ld1b { z16.s }, p5/Z, [x19]\n"
+    "sub z16.h, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 10b\n"
+    "b 16f\n"
+    "11:"  // Padded
+    "cbz x25, 14f\n"
+    "cmp x25, #0x1\n"
+    "sub x28, x28, x25\n"
+    "beq 13f\n"
+    "12:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16835c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16635e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+    "13:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "14:"  // Padded: 0 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "sub x28, x28, #0x2\n"
+    "sub x26, x26, #0x1\n"
+    "lsr x19, x28, #0x1\n"
+    "mov z16.d, z16.d\n"
+    "cmp x19, x26\n"
+    "csel x20, x19, x26, LT\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "and x28, x28, #0x1\n"
+    "sub x26, x26, x20\n"
+    "cbz x20, 16f\n"
+    "15:"  // Padded: Main loop
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "add x9, x9, #0x1\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    "bgt 15b\n"
+    "16:"  // Main loop tail
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "mov x12, #0x4\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add x9, x9, #0x1\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x27, x27, %x[ld_in_col]\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16735c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc16535e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+    "cbz x28, 17f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25305504  // psel p4.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p4/Z, [x27]\n"
+    "sub z14.h, p4/M, z14.h, z21.h\n"
+    "add x19, x27, %x[ld_in_row]\n"
+    ".inst 0x25705503  // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p3/Z, [x19]\n"
+    "sub z18.h, p3/M, z18.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b05502  // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p2/Z, [x19]\n"
+    "sub z15.h, p2/M, z15.h, z21.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f05501  // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "sub z17.h, p1/M, z17.h, z21.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25305500  // psel p0.s, p5.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "sub z16.h, p0/M, z16.h, z21.h\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc16435c8  // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+    "sub x26, x26, #0x1\n"
+    ".inst 0xc16335e8  // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    ".inst 0xc16835c9  // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc16635e9  // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "17:"  // Tail input: End
+    "cbz x26, 19f\n"
+    "18:"  // Right padding loop
+    ".inst 0xc0062818  // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+    ".inst 0xc1a1a418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+    "add x9, x9, #0x1\n"
+    ".inst 0xc1a0a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+    "subs x26, x26, #0x1\n"
+    ".inst 0xc0042982  // mova za.d[x9, #2], { z12.d-z13.d }\n"
+    ".inst 0xc1a2a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+    ".inst 0xc1b3c698  // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+    "st1b { z24.s }, p5, [x24]\n"
+    "add x24, x24, x22\n"
+    "st1b { z25.s }, p5, [x23]\n"
+    "add x23, x23, x21\n"
+    "bgt 18b\n"
+    "19:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..e412216
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za : public PlanarStrategy<uint8_t, int8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, int8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 3u, kernel_cols = 3u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..7a9724c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x9\n"
+    "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x7\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x17\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x8\n"
+    "addvl SP, SP, #-6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z5.h, p2/M, z5.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z0.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "incw x21\n"
+    "mov z17.h, #0x0\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "mov x19, x21\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "addvl x20, SP, #6\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "incw x21\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #3\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z24.h, z24.h, z13.h\n"
+    "sub z25.h, z25.h, z13.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "sub z16.h, z16.h, z13.h\n"
+    "addvl x20, x20, #-2\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "mov z1.d, z0.d\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "addvl x20, x20, #-2\n"
+    "mov z2.d, z0.d\n"
+    "mov z3.d, z0.d\n"
+    "trn1 z10.h, z24.h, z25.h\n"
+    "st1h { z10.h }, p2, [x20]\n"
+    "trn1 z11.h, z16.h, z17.h\n"
+    "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x15, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x17, x22, LSL #22\n"
+    "mov x21, #0x9\n"
+    "add x20, x8, x7\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x8, x14\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x14, x8, x19, x14\n"
+    ".inst 0xc0046c00  // mova za.d[x11, #0], { z0.d-z3.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0046c01  // mova za.d[x11, #1], { z0.d-z3.d }\n"
+    "mov x21, #0x2\n"
+    "ldp x10, x9, [x24], #0x10\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ldp x28, x27, [x19], #0x10\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    "ldp x26, x25, [x24], #0x10\n"
+    "ldp x24, x23, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "sub x13, x13, x20\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x8, x7\n"
+    "bne 12f\n"
+    "cbz x21, 10f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 2 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #4\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "9:"  // Unpadded: 1 priming loads
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "addvl x19, SP, #2\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "10:"  // Unpadded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "sub x15, x15, #0x2\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "sub x13, x13, #0x1\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x15, #0x1\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "cmp x19, x13\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x22, x19, x13, LT\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x22\n"
+    "cbz x22, 17f\n"
+    "11:"  // Unpadded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "add x21, x14, %x[ld_in_row]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "addvl x20, SP, #2\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z12.h, z12.h, z5.h\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z5.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z5.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z5.h\n"
+    ".inst 0xa0402a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "ld1b { z12.s }, p1/Z, [x14]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "ld1b { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "ld1b { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x26, x26, x24\n"
+    "ld1b { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z12.h, z12.h, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z13.h, z13.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "add z14.h, z14.h, z5.h\n"
+    "add z15.h, z15.h, z5.h\n"
+    "add z16.h, z16.h, z5.h\n"
+    "bgt 11b\n"
+    "b 17f\n"
+    "12:"  // Padded
+    "cbz x21, 15f\n"
+    "cmp x21, #0x1\n"
+    "sub x15, x15, x21\n"
+    "beq 14f\n"
+    "13:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #4\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "14:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "addvl x19, SP, #2\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "15:"  // Padded: 0 priming loads
+    "cmp x15, #0x2\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "blt 18f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "sub x15, x15, #0x2\n"
+    "sub x13, x13, #0x1\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "lsr x19, x15, #0x1\n"
+    "cmp x19, x13\n"
+    "mov z16.d, z16.d\n"
+    "csel x21, x19, x13, LT\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "and x15, x15, #0x1\n"
+    "sub x13, x13, x21\n"
+    "cbz x21, 17f\n"
+    "16:"  // Padded: Main loop
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x20, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "mov x12, #0x8\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "addvl x19, SP, #2\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "mov z16.d, z16.d\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "subs x21, x21, #0x1\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "bgt 16b\n"
+    "17:"  // Main loop tail
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "addvl x19, SP, #4\n"
+    "mov x12, #0x0\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "mov x12, #0x4\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #2\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x11, x11, #0x1\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add x14, x14, %x[ld_in_col]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "18:"  // Main loop skip tail
+    "cbz x15, 19f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z12.s }, p0/Z, [x14]\n"
+    "add z12.h, p0/M, z12.h, z5.h\n"
+    "add x19, x14, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z5.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z17.h, p0/M, z17.h, z5.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z5.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #4\n"
+    ".inst 0xc17a7588  // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+    "sub x13, x13, #0x1\n"
+    ".inst 0xc17b75a8  // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+    ".inst 0xa0402a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    ".inst 0xc17a7589  // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc17b75a9  // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "19:"  // Tail input: End
+    "cbz x13, 21f\n"
+    "20:"  // Right padding loop
+    ".inst 0xc0066c1c  // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+    ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+    "add x11, x11, #0x1\n"
+    ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+    "subs x13, x13, #0x1\n"
+    ".inst 0xc0046c02  // mova za.d[x11, #2], { z0.d-z3.d }\n"
+    ".inst 0xc1a4ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+    ".inst 0xc1b7cf7c  // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
+    "st1b { z28.s }, p1, [x10]\n"
+    "add x10, x10, x28\n"
+    "st1b { z29.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z30.s }, p1, [x26]\n"
+    "add x26, x26, x24\n"
+    "st1b { z31.s }, p1, [x25]\n"
+    "add x25, x25, x23\n"
+    "bgt 20b\n"
+    "21:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x16\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "whilelt p1.s, x16, x17\n"
+    "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x14, x14, x19\n"
+    "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #6\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za.hpp
new file mode 100644
index 0000000..6071197
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za : public PlanarStrategy<uint8_t, int8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, int8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 1u, stride_cols = 1u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..d697064
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0x8\n"
+    "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x5\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x7\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x6\n"
+    "addvl SP, SP, #-30\n"
+    "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z25.h, p2/M, z25.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z6.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x21, x23\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "mov z2.h, #0x0\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "incw x23\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "addvl x20, SP, #30\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "incw x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "incw x23\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "mov x21, x23\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "incw x23\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "addvl x20, x20, #-6\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "mov x21, x23\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "ld1sb { z18.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "ld1sb { z17.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "ld1sb { z21.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "ld1sb { z16.s }, p2/Z, [x21]\n"
+    "incw x21, ALL, MUL #5\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "ld1sb { z15.s }, p2/Z, [x21]\n"
+    "sub z18.h, z18.h, z12.h\n"
+    "addvl x20, x20, #-6\n"
+    "sub z17.h, z17.h, z12.h\n"
+    "sub z21.h, z21.h, z12.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z12.h\n"
+    "sub z15.h, z15.h, z12.h\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "mov z7.d, z6.d\n"
+    "trn1 z0.h, z2.h, z18.h\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "trn1 z8.h, z18.h, z17.h\n"
+    "trn1 z4.h, z17.h, z21.h\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "trn1 z5.h, z21.h, z16.h\n"
+    "trn1 z10.h, z16.h, z15.h\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "addvl x20, x20, #-6\n"
+    "trn1 z11.h, z15.h, z2.h\n"
+    "st1h { z0.h }, p2, [x20]\n"
+    "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+    "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+    "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+    "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x16, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x7, x22, LSL #22\n"
+    "mov x21, #0x8\n"
+    "add x20, x6, x5\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x11, #0x0\n"
+    "mov x8, #0x8\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x6, x15\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x15, x6, x19, x15\n"
+    ".inst 0xc00468c0  // mova za.d[x11, #0], { z6.d-z7.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc00468c1  // mova za.d[x11, #1], { z6.d-z7.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x13, x4, [x24], #0x10\n"
+    ".inst 0xc00468c2  // mova za.d[x11, #2], { z6.d-z7.d }\n"
+    "ldp x10, x9, [x19], #0x10\n"
+    ".inst 0xc00468c3  // mova za.d[x11, #3], { z6.d-z7.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc00468c4  // mova za.d[x11, #4], { z6.d-z7.d }\n"
+    "ldp x28, x27, [x24], #0x10\n"
+    ".inst 0xc00468c5  // mova za.d[x11, #5], { z6.d-z7.d }\n"
+    "ldp x26, x25, [x19], #0x10\n"
+    ".inst 0xc00468c6  // mova za.d[x11, #6], { z6.d-z7.d }\n"
+    ".inst 0xc00468c7  // mova za.d[x11, #7], { z6.d-z7.d }\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "sub x14, x14, x20\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x6, x5\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z29.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z29.h, z16.h, z29.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x21, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x22, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x21, SP, #12\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x23, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "addvl x22, SP, #6\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x21, SP, #12\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x20, SP, #18\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x23]\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x23]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x15]\n"
+    "sub x16, x16, #0x1\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z17.h, z16.h\n"
+    "sub x14, x14, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "cmp x16, x14\n"
+    "add z27.h, z27.h, z25.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z28.h, z28.h, z25.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z29.h, z17.h, z16.h\n"
+    "add z29.h, z29.h, z25.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "sub x14, x14, x24\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add z30.h, z30.h, z25.h\n"
+    "cbz x24, 21f\n"
+    "13:"  // Unpadded: Main loop
+    "addvl x23, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #12\n"
+    "ld1b { z23.s }, p1/Z, [x15]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ae0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+    "addvl x21, SP, #18\n"
+    "addvl x20, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    "ld1b { z22.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "subs x24, x24, #0x1\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ae4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aea  // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    "add z27.h, z27.h, z25.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    "add z28.h, z28.h, z25.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "add z29.h, z29.h, z25.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "add z30.h, z30.h, z25.h\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x16, x16, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x20, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #24\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x20, SP, #18\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    "addvl x19, SP, #24\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "addvl x22, SP, #6\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "trn1 z29.h, z18.h, z16.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x21, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    "addvl x19, SP, #24\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    "19:"  // Padded: 0 priming loads
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "cbz x16, 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x15]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z27.h, z19.h, z18.h\n"
+    "trn1 z28.h, z17.h, z16.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    "sub x16, x16, #0x1\n"
+    "sub x14, x14, #0x1\n"
+    "cmp x16, x14\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    "csel x24, x16, x14, LT\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    "sub x14, x14, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z23.s }, p0/Z, [x15]\n"
+    "add z23.h, p0/M, z23.h, z25.h\n"
+    "add x23, x15, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z22.s }, p0/Z, [x23]\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x21, SP, #12\n"
+    "add z22.h, p0/M, z22.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    "ld1b { z21.s }, p0/Z, [x23]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    "add z21.h, p0/M, z21.h, z25.h\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    "mov x12, #0x4\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z20.s }, p0/Z, [x23]\n"
+    "add z20.h, p0/M, z20.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z19.s }, p0/Z, [x23]\n"
+    "add z19.h, p0/M, z19.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "add x15, x15, %x[ld_in_col]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    "ld1b { z18.s }, p0/Z, [x23]\n"
+    "add z18.h, p0/M, z18.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    "ld1b { z17.s }, p0/Z, [x23]\n"
+    "add z17.h, p0/M, z17.h, z25.h\n"
+    "add x23, x23, %x[ld_in_row]\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    "ld1b { z16.s }, p0/Z, [x23]\n"
+    "add z16.h, p0/M, z16.h, z25.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402be0  // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+    "trn1 z27.h, z23.h, z22.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412be4  // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+    "trn1 z28.h, z21.h, z20.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xa0422bea  // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+    "trn1 z29.h, z19.h, z18.h\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "trn1 z30.h, z17.h, z16.h\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    "addvl x22, SP, #6\n"
+    ".inst 0xc1687768  // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+    "addvl x21, SP, #12\n"
+    ".inst 0xc1607769  // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402ac0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #18\n"
+    "addvl x19, SP, #24\n"
+    ".inst 0xc168776a  // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776b  // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402aa0  // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1657788  // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1647789  // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412ac4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+    ".inst 0xc168776c  // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776d  // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a80  // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc165778a  // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778b  // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412aa4  // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+    ".inst 0xc16b77a8  // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77a9  // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aca  // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+    ".inst 0xc168776e  // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc160776f  // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xa1402a60  // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc165778c  // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778d  // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a84  // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+    ".inst 0xc16b77aa  // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ab  // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422aaa  // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+    ".inst 0xc165778e  // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc164778f  // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xa0412a64  // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+    ".inst 0xc16b77ac  // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77ad  // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a8a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+    ".inst 0xc16b77ae  // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a77af  // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
+    ".inst 0xa0422a6a  // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+    ".inst 0xc1681768  // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
+    ".inst 0xc1601769  // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+    ".inst 0xc1651788  // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
+    ".inst 0xc1641789  // sdot za.s[x8, 1], { z28.h-z29.h }, z4.h\n"
+    ".inst 0xc16b17a8  // sdot za.s[x8, 0], { z29.h-z30.h }, z11.h\n"
+    ".inst 0xc16a17a9  // sdot za.s[x8, 1], { z29.h-z30.h }, z10.h\n"
+    "add x8, x8, #0x2\n"
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "22:"  // Main loop skip tail
+    "cbz x14, 24f\n"
+    "23:"  // Right padding loop
+    ".inst 0xc006680c  // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+    "add x8, x8, #0x2\n"
+    "subs x14, x14, #0x1\n"
+    ".inst 0xc006682e  // mova { z14.d-z15.d }, za.d[x11, #1]\n"
+    ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+    "add x11, x11, #0x2\n"
+    ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+    ".inst 0xc00408c0  // mova za.d[x8, #0], { z6.d-z7.d }\n"
+    ".inst 0xc1a9ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+    ".inst 0xc00408c1  // mova za.d[x8, #1], { z6.d-z7.d }\n"
+    ".inst 0xc1bfcf0c  // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
+    "st1b { z12.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z14.s }, p1, [x4]\n"
+    "add x4, x4, x9\n"
+    "st1b { z13.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "st1b { z15.s }, p1, [x27]\n"
+    "add x27, x27, x25\n"
+    "bgt 23b\n"
+    "24:"  // End
+    "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x23, ALL, MUL #16\n"
+    "incw x23, ALL, MUL #9\n"
+    "str x23, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x17\n"
+    "whilelt p1.s, x17, x7\n"
+    "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x15, x15, x19\n"
+    "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #30\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za.hpp
new file mode 100644
index 0000000..6949e69
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+);
+
+class sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za : public PlanarStrategy<uint8_t, int8_t>
+{
+  using Parent = PlanarStrategy<uint8_t, int8_t>;
+
+  public:
+  using return_type = uint8_t;
+  constexpr static auto output_rows = 4u;
+  constexpr static auto kernel_rows = 5u, kernel_cols = 5u;
+  constexpr static auto stride_rows = 2u, stride_cols = 2u;
+  constexpr static auto vl_type = arm_gemm::VLType::SME;
+
+  sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za(const CPUInfo *)
+  : Parent(kernel_rows, kernel_cols, stride_rows, stride_cols, output_rows, vl_type)
+  {
+  }
+
+  typename Parent::KernelType get_kernel(void) const override
+  {
+    return sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl;
+  }
+};
+
+}  // namespace depthwise
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
new file mode 100644
index 0000000..8cdc94d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
+  const uint8_t *inptr,
+  size_t ld_in_row,
+  size_t ld_in_col,
+  size_t ld_in_vl,
+  unsigned int pad_top,
+  unsigned int valid_input_rows,
+  unsigned int pad_left,
+  unsigned int valid_input_cols,
+  const int8_t *weights,
+  uint8_t **outptrs,
+  const size_t *outlds,
+  const size_t *outvllds,
+  unsigned int output_cols,
+  unsigned int start_channel,
+  unsigned int valid_channels,
+  const arm_gemm::Requantize32 &qp
+)
+{
+  struct Args
+  {
+    const uint8_t *inptr;
+    size_t ld_in_vl;
+    long unsigned int pad_top, pad_bottom, pad_left;
+    const int8_t *weights;
+    long unsigned int input_cols, output_cols;
+    uint8_t **outptrs;
+    const size_t *ld_out_cols;
+    const size_t *ld_out_vls;
+    long unsigned int current_channel, n_channels;
+  };
+
+  Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels };
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+    "ptrue p2.b\n"
+    "mov x19, #0xb\n"
+    "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+    "ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+    "sub x19, x19, x4\n"
+    ".inst 0x25207812  // ptrue pn10.b\n"
+    "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+    "whilelt p1.s, XZR, x6\n"
+    "whilelt p9.s, XZR, x19\n"
+    "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+    "whilelt p8.s, XZR, x5\n"
+    "addvl SP, SP, #-15\n"
+    "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+    "neg z9.h, p2/M, z9.h\n"
+    "eor p8.b, p2/Z, p8.b, p9.b\n"
+    "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+    "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+    "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+    "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+    "1:"  // Channel loop
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+    "mov z28.s, #0x0\n"
+    "cbz x19, 2f\n"
+    "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "2:"  // Load bias: Done
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "mov x19, x21\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "incw x21\n"
+    "mov z14.h, #0x0\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "addvl x20, SP, #15\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "incw x21\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "mov x19, x21\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "incw x21\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "incw x21\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "addvl x20, x20, #-3\n"
+    "mov x19, x21\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "ld1sb { z12.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "ld1sb { z25.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "ld1sb { z24.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "sub z12.h, z12.h, z18.h\n"
+    "ld1sb { z17.s }, p2/Z, [x19]\n"
+    "incw x19, ALL, MUL #5\n"
+    "sub z25.h, z25.h, z18.h\n"
+    "sub z24.h, z24.h, z18.h\n"
+    "ld1sb { z16.s }, p2/Z, [x19]\n"
+    "sub z17.h, z17.h, z18.h\n"
+    "sub z16.h, z16.h, z18.h\n"
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+    "addvl x20, x20, #-3\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "mov z29.d, z28.d\n"
+    "mov z30.d, z28.d\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "mov z31.d, z28.d\n"
+    "trn1 z2.h, z12.h, z25.h\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "addvl x20, x20, #-3\n"
+    "trn1 z10.h, z24.h, z17.h\n"
+    "trn1 z0.h, z16.h, z14.h\n"
+    "st1h { z2.h }, p2, [x20]\n"
+    "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+    "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+    "cbz x19, 3f\n"
+    "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "3:"  // Load mul: End
+    "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+    "cbz x19, 4f\n"
+    "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
+    "4:"  // Load right_shift: End
+    "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+    "sub x19, x17, #0x1\n"
+    "orr x22, x19, %x[ld_in_col], LSL #16\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "orr x22, x6, x22, LSL #22\n"
+    "mov x21, #0xb\n"
+    "add x20, x5, x4\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+    "mov x8, #0x0\n"
+    "lsl x22, x22, #0x0\n"
+    "sub x21, x21, x20\n"
+    "madd x19, x19, x5, x16\n"
+    "5:"  // Issue prefetches
+    "subs x21, x21, #0x1\n"
+    ".inst 0xf8b64a7c  // rprfm pldstrm, x22, [x19]\n"
+    "add x19, x19, %x[ld_in_col]\n"
+    "bgt 5b\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "lsl x19, %x[ld_in_row], #0x0\n"
+    "msub x16, x5, x19, x16\n"
+    ".inst 0xc0040f80  // mova za.d[x8, #0], { z28.d-z31.d }\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+    ".inst 0xc0040f81  // mova za.d[x8, #1], { z28.d-z31.d }\n"
+    "mov x21, #0x4\n"
+    "ldp x14, x13, [x24], #0x10\n"
+    ".inst 0xc0040f82  // mova za.d[x8, #2], { z28.d-z31.d }\n"
+    "ldp x11, x10, [x19], #0x10\n"
+    ".inst 0xc0040f83  // mova za.d[x8, #3], { z28.d-z31.d }\n"
+    "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "ldp x9, x28, [x24], #0x10\n"
+    "ldp x27, x26, [x19], #0x10\n"
+    "cbz x20, 7f\n"
+    "cmp x20, x21\n"
+    "csel x19, x20, x21, LT\n"
+    "sub x20, x20, x19\n"
+    "sub x21, x21, x19\n"
+    "cbz x20, 7f\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "and x21, x20, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x20, x20, #0x1\n"
+    "lsr x20, x20, #0x1\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "sub x15, x15, x20\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "6:"  // Left padding
+    "subs x20, x20, #0x1\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 6b\n"
+    "7:"  // Left padding: End
+    "adds XZR, x5, x4\n"
+    "bne 14f\n"
+    "cbz x21, 12f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 11f\n"
+    "cmp x21, #0x2\n"
+    "beq 10f\n"
+    "cmp x21, #0x3\n"
+    "beq 9f\n"
+    "8:"  // Unpadded: 4 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #12\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "9:"  // Unpadded: 3 priming loads
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x19, SP, #9\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "10:"  // Unpadded: 2 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #6\n"
+    "ld1b { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #12\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "11:"  // Unpadded: 1 priming loads
+    "add x21, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "addvl x20, SP, #3\n"
+    "ld1b { z21.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "addvl x19, SP, #12\n"
+    "ld1b { z20.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z19.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z18.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x21]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x21]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p1/Z, [x21]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "12:"  // Unpadded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "sub x17, x17, #0x2\n"
+    "ld1b { z21.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "sub x15, x15, #0x1\n"
+    "ld1b { z12.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "lsr x19, x17, #0x1\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z20.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "cmp x19, x15\n"
+    "ld1b { z13.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "csel x25, x19, x15, LT\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z19.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "and x17, x17, #0x1\n"
+    "ld1b { z17.s }, p1/Z, [x20]\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1b { z16.s }, p1/Z, [x20]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    "sub x15, x15, x25\n"
+    "cbz x25, 21f\n"
+    "13:"  // Unpadded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x24, SP, #6\n"
+    "addvl x23, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402b02  // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+    "add x22, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "addvl x20, SP, #9\n"
+    "subs x25, x25, #0x1\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z21.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z12.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "ld1b { z20.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z13.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "ld1b { z19.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z13.h, z13.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "ld1b { z18.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x22]\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "ld1b { z17.s }, p1/Z, [x22]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add x22, x22, %x[ld_in_row]\n"
+    "add z15.h, z15.h, z9.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    "ld1b { z16.s }, p1/Z, [x22]\n"
+    "mov z16.d, z16.d\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add z16.h, z16.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1b { z11.s }, p1/Z, [x16]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "ld1b { z21.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "ld1b { z12.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "ld1b { z20.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "add x28, x28, x26\n"
+    "ld1b { z13.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z11.h, z11.h, z9.h\n"
+    "ld1b { z19.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "add z12.h, z12.h, z9.h\n"
+    "ld1b { z14.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "add z13.h, z13.h, z9.h\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1b { z18.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "add z14.h, z14.h, z9.h\n"
+    "ld1b { z15.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1b { z17.s }, p1/Z, [x19]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "add z15.h, z15.h, z9.h\n"
+    "ld1b { z16.s }, p1/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "add z16.h, z16.h, z9.h\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "bgt 13b\n"
+    "b 21f\n"
+    "14:"  // Padded
+    "cbz x21, 19f\n"
+    "cmp x21, #0x1\n"
+    "sub x17, x17, x21\n"
+    "beq 18f\n"
+    "cmp x21, #0x2\n"
+    "beq 17f\n"
+    "cmp x21, #0x3\n"
+    "beq 16f\n"
+    "15:"  // Padded: 4 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #12\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "16:"  // Padded: 3 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x20, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x20]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x20]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x20]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x20]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x20]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x20]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x20]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x20]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x20]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x19, SP, #9\n"
+    "add x20, x20, %x[ld_in_row]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x20]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "17:"  // Padded: 2 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #6\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "18:"  // Padded: 1 priming loads
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "addvl x20, SP, #3\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "addvl x19, SP, #12\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "19:"  // Padded: 0 priming loads
+    "cmp x17, #0x2\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "blt 22f\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "sub x17, x17, #0x2\n"
+    "sub x15, x15, #0x1\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "lsr x19, x17, #0x1\n"
+    "cmp x19, x15\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "csel x24, x19, x15, LT\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    "and x17, x17, #0x1\n"
+    "sub x15, x15, x24\n"
+    "cbz x24, 21f\n"
+    "20:"  // Padded: Main loop
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    "addvl x21, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x20, SP, #9\n"
+    "subs x24, x24, #0x1\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402aa2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "mov x12, #0x0\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "mov x12, #0x4\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "mov z16.d, z16.d\n"
+    "bgt 20b\n"
+    "21:"  // Main loop tail
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "addvl x23, SP, #6\n"
+    "addvl x22, SP, #12\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ae2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "add x21, x16, %x[ld_in_row]\n"
+    "addvl x20, SP, #3\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402ac2  // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+    "addvl x19, SP, #9\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x21]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    "ld1b { z12.s }, p0/Z, [x21]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "mov x12, #0x4\n"
+    "ld1b { z20.s }, p0/Z, [x21]\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "ld1b { z13.s }, p0/Z, [x21]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x21]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x21]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x21]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x21]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x21]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    "add x21, x21, %x[ld_in_row]\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "ld1b { z16.s }, p0/Z, [x21]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    "add x16, x16, %x[ld_in_col]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xa1402be2  // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "22:"  // Main loop skip tail
+    "cbz x17, 23f\n"  // Skip remainder inputs
+    "mov x12, #0x0\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z11.s }, p0/Z, [x16]\n"
+    "add z11.h, p0/M, z11.h, z9.h\n"
+    "add x19, x16, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z21.s }, p0/Z, [x19]\n"
+    "add z21.h, p0/M, z21.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z12.s }, p0/Z, [x19]\n"
+    "add z12.h, p0/M, z12.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z20.s }, p0/Z, [x19]\n"
+    "add z20.h, p0/M, z20.h, z9.h\n"
+    "mov x12, #0x4\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    "trn1 z11.h, z11.h, z21.h\n"
+    "trn1 z12.h, z12.h, z20.h\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z13.s }, p0/Z, [x19]\n"
+    "add z13.h, p0/M, z13.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z19.s }, p0/Z, [x19]\n"
+    "add z19.h, p0/M, z19.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "ld1b { z14.s }, p0/Z, [x19]\n"
+    "add z14.h, p0/M, z14.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25f04500  // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+    "ld1b { z18.s }, p0/Z, [x19]\n"
+    "mov x12, #0x8\n"
+    "add z18.h, p0/M, z18.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25304500  // psel p0.s, p1.s/Z, p8.s[w12]\n"
+    "ld1b { z15.s }, p0/Z, [x19]\n"
+    "add z15.h, p0/M, z15.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25704500  // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+    "ld1b { z17.s }, p0/Z, [x19]\n"
+    "add z17.h, p0/M, z17.h, z9.h\n"
+    "add x19, x19, %x[ld_in_row]\n"
+    ".inst 0x25b04500  // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+    "trn1 z13.h, z13.h, z19.h\n"
+    "trn1 z14.h, z14.h, z18.h\n"
+    "ld1b { z16.s }, p0/Z, [x19]\n"
+    "add z16.h, p0/M, z16.h, z9.h\n"
+    "trn1 z15.h, z15.h, z17.h\n"
+    "addvl x20, SP, #6\n"
+    ".inst 0xc1721568  // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+    "mov z16.d, z16.d\n"
+    "addvl x19, SP, #12\n"
+    "sub x15, x15, #0x1\n"
+    ".inst 0xc17a1588  // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a82  // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+    ".inst 0xc17015a8  // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+    ".inst 0xc1721569  // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    ".inst 0xc17a1589  // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xa1402a62  // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    ".inst 0xc172156a  // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc17a158a  // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    ".inst 0xc17015a9  // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+    "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    ".inst 0xc17015aa  // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+    "add x8, x8, #0x1\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    "23:"  // Tail input: End
+    "cbz x15, 25f\n"
+    "24:"  // Right padding loop
+    ".inst 0xc0060c04  // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+    ".inst 0xc1a3ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
+    "add x8, x8, #0x1\n"
+    ".inst 0xc1a1aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+    "subs x15, x15, #0x1\n"
+    ".inst 0xc0040f84  // mova za.d[x8, #4], { z28.d-z31.d }\n"
+    ".inst 0xc1a8ab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+    ".inst 0xc1b7cf44  // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
+    "st1b { z4.s }, p1, [x14]\n"
+    "add x14, x14, x11\n"
+    "st1b { z5.s }, p1, [x13]\n"
+    "add x13, x13, x10\n"
+    "st1b { z6.s }, p1, [x9]\n"
+    "add x9, x9, x27\n"
+    "st1b { z7.s }, p1, [x28]\n"
+    "add x28, x28, x26\n"
+    "bgt 24b\n"
+    "25:"  // End
+    "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "incw x21, ALL, MUL #16\n"
+    "incw x21, ALL, MUL #9\n"
+    "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+    "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+    "incw x7\n"
+    "whilelt p1.s, x7, x6\n"
+    "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "add x16, x16, x19\n"
+    "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+    "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+    "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+    "ldp x22, x21, [x24, #0x0]\n"
+    "ldp x20, x19, [x23, #0x0]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x0]\n"
+    "ldp x22, x21, [x24, #0x10]\n"
+    "ldp x20, x19, [x23, #0x10]\n"
+    "add x22, x22, x20\n"
+    "add x21, x21, x19\n"
+    "stp x22, x21, [x24, #0x10]\n"
+    "b.any 1b\n"
+    "addvl SP, SP, #15\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace depthwise
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..250d92c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
+{
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
+
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..a8b6f18
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    __fp16 rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<__fp16>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x4, #0x0\n"
+    "mov x19, #0x4\n"
+    "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+    "whilelt p0.h, XZR, x19\n"
+    "add x19, %x[args], %[offsetof_rescale]\n"
+    "ld1rqh { z4.h }, p0/Z, [x19]\n"
+    "ldr x6, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.h, x4, x6\n"
+    "mov x7, #0x0\n"
+    "ldp x8, x17, [x20, #0x0]\n"
+    "ldp x16, x15, [x20, #0x10]\n"
+    "ldp x14, x13, [x5, #0x0]\n"
+    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+    "ldp x12, x11, [x5, #0x10]\n"
+    "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+    "ldp x10, x9, [x5, #0x20]\n"
+    "ld1h { z1.h }, p1/Z, [x9, x4, LSL #1]\n"
+    "ldp x28, x27, [x5, #0x30]\n"
+    "ld1h { z0.h }, p1/Z, [x28, x4, LSL #1]\n"
+    "ldp x26, x25, [x5, #0x40]\n"
+    "ld1h { z31.h }, p1/Z, [x25, x4, LSL #1]\n"
+    "ldp x24, x23, [x5, #0x50]\n"
+    "ld1h { z30.h }, p1/Z, [x24, x4, LSL #1]\n"
+    "ldp x22, x21, [x5, #0x60]\n"
+    "ld1h { z29.h }, p1/Z, [x10, x4, LSL #1]\n"
+    "ldp x20, x19, [x5, #0x70]\n"
+    "ld1h { z28.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "ld1h { z22.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "ld1h { z21.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x14, x4, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "incw x4\n"
+    "whilelt p1.h, x4, x6\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "fadd z17.h, z1.h, z0.h\n"
+    "fadd z16.h, z31.h, z30.h\n"
+    "ld1h { z1.h }, p1/Z, [x9, x4, LSL #1]\n"
+    "whilelt p0.h, x7, x6\n"
+    "fadd z19.h, z17.h, z16.h\n"
+    "fadd z18.h, z3.h, z2.h\n"
+    "ld1h { z0.h }, p1/Z, [x28, x4, LSL #1]\n"
+    "fadd z17.h, z29.h, z28.h\n"
+    "fadd z22.h, z27.h, z22.h\n"
+    "ld1h { z31.h }, p1/Z, [x25, x4, LSL #1]\n"
+    "fadd z16.h, z21.h, z20.h\n"
+    "fadd z21.h, z18.h, z19.h\n"
+    "ld1h { z30.h }, p1/Z, [x24, x4, LSL #1]\n"
+    "fadd z20.h, z16.h, z19.h\n"
+    "fadd z19.h, z26.h, z17.h\n"
+    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+    "fadd z18.h, z25.h, z22.h\n"
+    "fadd z17.h, z24.h, z17.h\n"
+    "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+    "fadd z16.h, z23.h, z22.h\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "ld1h { z29.h }, p1/Z, [x10, x4, LSL #1]\n"
+    "fadd z18.h, z18.h, z21.h\n"
+    "fadd z17.h, z17.h, z20.h\n"
+    "ld1h { z28.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "fadd z16.h, z16.h, z20.h\n"
+    "ld1h { z27.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "fmul z19.h, z19.h, z4.h[0]\n"
+    "ld1h { z22.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "fmul z18.h, z18.h, z4.h[1]\n"
+    "fmul z17.h, z17.h, z4.h[2]\n"
+    "ld1h { z21.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "fmul z16.h, z16.h, z4.h[3]\n"
+    "st1h { z19.h }, p0, [x8, x7, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "st1h { z18.h }, p0, [x17, x7, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x14, x4, LSL #1]\n"
+    "st1h { z17.h }, p0, [x16, x7, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "st1h { z16.h }, p0, [x15, x7, LSL #1]\n"
+    "incw x7\n"
+    "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "incw x4\n"
+    "whilelt p1.h, x4, x6\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "fadd z17.h, z1.h, z0.h\n"
+    "fadd z16.h, z31.h, z30.h\n"
+    "whilelt p0.h, x7, x6\n"
+    "fadd z19.h, z17.h, z16.h\n"
+    "fadd z18.h, z3.h, z2.h\n"
+    "fadd z17.h, z29.h, z28.h\n"
+    "fadd z22.h, z27.h, z22.h\n"
+    "fadd z16.h, z21.h, z20.h\n"
+    "fadd z21.h, z18.h, z19.h\n"
+    "fadd z20.h, z16.h, z19.h\n"
+    "fadd z19.h, z26.h, z17.h\n"
+    "fadd z18.h, z25.h, z22.h\n"
+    "fadd z17.h, z24.h, z17.h\n"
+    "fadd z16.h, z23.h, z22.h\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "fadd z18.h, z18.h, z21.h\n"
+    "fadd z17.h, z17.h, z20.h\n"
+    "fadd z16.h, z16.h, z20.h\n"
+    "fmul z19.h, z19.h, z4.h[0]\n"
+    "st1h { z19.h }, p0, [x8, x7, LSL #1]\n"
+    "fmul z18.h, z18.h, z4.h[1]\n"
+    "fmul z17.h, z17.h, z4.h[2]\n"
+    "st1h { z18.h }, p0, [x17, x7, LSL #1]\n"
+    "fmul z16.h, z16.h, z4.h[3]\n"
+    "st1h { z17.h }, p0, [x16, x7, LSL #1]\n"
+    "st1h { z16.h }, p0, [x15, x7, LSL #1]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..117eb36
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct sme_fp16_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
+{
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
+  sme_fp16_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_fp16_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..2c1e698
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_fp16_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cnth x27\n"
+    "cnth x26, ALL, MUL #2\n"
+    "cnth x25, ALL, MUL #3\n"
+    "ptrue p0.b\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "ld1rh { z6.h }, p0/Z, [%x[rescale_ptr]]\n"
+    "whilelt p2.h, x27, %x[n_channels]\n"
+    "whilelt p1.h, x26, %x[n_channels]\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov z4.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z3.b, #0x0\n"
+    "mov z2.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z18.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd z23.h, z1.h, z0.h\n"
+    "fadd z19.h, z31.h, z30.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z22.h, z29.h, z22.h\n"
+    "fadd z18.h, z28.h, z18.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "fadd z21.h, z27.h, z21.h\n"
+    "fadd z17.h, z26.h, z17.h\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z20.h, z25.h, z20.h\n"
+    "fadd z16.h, z24.h, z16.h\n"
+    "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "fadd z18.h, z22.h, z18.h\n"
+    "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "fadd z17.h, z21.h, z17.h\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "fadd z5.h, z5.h, z19.h\n"
+    "fadd z4.h, z4.h, z18.h\n"
+    "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "fadd z3.h, z3.h, z17.h\n"
+    "fadd z2.h, z2.h, z16.h\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z18.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd z23.h, z1.h, z0.h\n"
+    "fadd z19.h, z31.h, z30.h\n"
+    "fadd z22.h, z29.h, z22.h\n"
+    "fadd z18.h, z28.h, z18.h\n"
+    "fadd z21.h, z27.h, z21.h\n"
+    "fadd z17.h, z26.h, z17.h\n"
+    "fadd z20.h, z25.h, z20.h\n"
+    "fadd z16.h, z24.h, z16.h\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "fadd z18.h, z22.h, z18.h\n"
+    "fadd z17.h, z21.h, z17.h\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "fadd z5.h, z5.h, z19.h\n"
+    "fadd z4.h, z4.h, z18.h\n"
+    "fadd z3.h, z3.h, z17.h\n"
+    "fadd z2.h, z2.h, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "subs x20, x20, #0x1\n"
+    "fadd z5.h, z5.h, z1.h\n"
+    "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "fadd z4.h, z4.h, z29.h\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "fadd z3.h, z3.h, z27.h\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "fadd z2.h, z2.h, z25.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul z5.h, z5.h, z6.h\n"
+    "fmul z4.h, z4.h, z6.h\n"
+    "st1h { z5.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "inch x28, ALL, MUL #4\n"
+    "fmul z3.h, z3.h, z6.h\n"
+    "fmul z2.h, z2.h, z6.h\n"
+    "st1h { z4.h }, p2, [%x[outptr], x27, LSL #1]\n"
+    "inch x27, ALL, MUL #4\n"
+    "st1h { z3.h }, p1, [%x[outptr], x26, LSL #1]\n"
+    "inch x26, ALL, MUL #4\n"
+    "st1h { z2.h }, p0, [%x[outptr], x25, LSL #1]\n"
+    "inch x25, ALL, MUL #4\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd z23.h, z1.h, z0.h\n"
+    "fadd z19.h, z31.h, z30.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z5.h, z5.h, z19.h\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd z23.h, z1.h, z0.h\n"
+    "fadd z19.h, z31.h, z30.h\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "fadd z5.h, z5.h, z19.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "subs x20, x20, #0x1\n"
+    "fadd z5.h, z5.h, z1.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul z5.h, z5.h, z6.h\n"
+    "st1h { z5.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "inch x28\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..9489c1f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
+{
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
+
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..fe2e7c8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x14, #0x0\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.h, x14, x12\n"
+    "ldp x11, x10, [x20, #0x0]\n"
+    "ldp x9, x28, [x20, #0x10]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ld1h { z29.h }, p1/Z, [x26, x14, LSL #1]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ld1h { z28.h }, p1/Z, [x24, x14, LSL #1]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ld1h { z27.h }, p1/Z, [x23, x14, LSL #1]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ld1h { z26.h }, p1/Z, [x20, x14, LSL #1]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1h { z20.h }, p1/Z, [x27, x14, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x14, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x25, x14, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x21, x14, LSL #1]\n"
+    "ld1h { z19.h }, p1/Z, [x19, x14, LSL #1]\n"
+    "incw x14\n"
+    "whilelt p1.h, x14, x12\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z29\n fmax z22.h, p2/M, z22.h, z27.h\n"
+    "movprfx z21, z27\n fmax z21.h, p2/M, z21.h, z26.h\n"
+    "ld1h { z29.h }, p1/Z, [x26, x14, LSL #1]\n"
+    "whilelt p0.h, x13, x12\n"
+    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z20.h\n"
+    "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z24.h\n"
+    "ld1h { z27.h }, p1/Z, [x23, x14, LSL #1]\n"
+    "movprfx z17, z23\n fmax z17.h, p2/M, z17.h, z28.h\n"
+    "movprfx z16, z25\n fmax z16.h, p2/M, z16.h, z19.h\n"
+    "ld1h { z26.h }, p1/Z, [x20, x14, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x24, x14, LSL #1]\n"
+    "movprfx z19, z18\n fmax z19.h, p2/M, z19.h, z22.h\n"
+    "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z20.h\n"
+    "ld1h { z20.h }, p1/Z, [x27, x14, LSL #1]\n"
+    "fmax z17.h, p2/M, z17.h, z21.h\n"
+    "fmax z16.h, p2/M, z16.h, z21.h\n"
+    "ld1h { z25.h }, p1/Z, [x22, x14, LSL #1]\n"
+    "st1h { z19.h }, p0, [x11, x13, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x25, x14, LSL #1]\n"
+    "st1h { z18.h }, p0, [x10, x13, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x21, x14, LSL #1]\n"
+    "st1h { z17.h }, p0, [x9, x13, LSL #1]\n"
+    "ld1h { z19.h }, p1/Z, [x19, x14, LSL #1]\n"
+    "incw x14\n"
+    "whilelt p1.h, x14, x12\n"
+    "st1h { z16.h }, p0, [x28, x13, LSL #1]\n"
+    "incw x13\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z29\n fmax z22.h, p2/M, z22.h, z27.h\n"
+    "movprfx z21, z27\n fmax z21.h, p2/M, z21.h, z26.h\n"
+    "whilelt p0.h, x13, x12\n"
+    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z20.h\n"
+    "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z24.h\n"
+    "movprfx z17, z23\n fmax z17.h, p2/M, z17.h, z28.h\n"
+    "movprfx z16, z25\n fmax z16.h, p2/M, z16.h, z19.h\n"
+    "movprfx z19, z18\n fmax z19.h, p2/M, z19.h, z22.h\n"
+    "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z20.h\n"
+    "st1h { z19.h }, p0, [x11, x13, LSL #1]\n"
+    "fmax z17.h, p2/M, z17.h, z21.h\n"
+    "fmax z16.h, p2/M, z16.h, z21.h\n"
+    "st1h { z18.h }, p0, [x10, x13, LSL #1]\n"
+    "st1h { z17.h }, p0, [x9, x13, LSL #1]\n"
+    "st1h { z16.h }, p0, [x28, x13, LSL #1]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..33ff1f2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp16_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct sme_fp16_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
+{
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
+  sme_fp16_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_fp16_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..1bb27e3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_fp16_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cnth x27\n"
+    "cnth x26, ALL, MUL #2\n"
+    "cnth x25, ALL, MUL #3\n"
+    "whilelt p4.h, x28, %x[n_channels]\n"
+    "whilelt p3.h, x27, %x[n_channels]\n"
+    "whilelt p2.h, x26, %x[n_channels]\n"
+    "whilelt p1.h, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.h, #0xfc00\n"
+    "mov z3.h, #0xfc00\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.h, #0xfc00\n"
+    "mov z1.h, #0xfc00\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z29.h }, p3/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z28.h }, p3/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z26.h }, p2/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
+    "fmax z23.h, p0/M, z23.h, z30.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z18.h, p0/M, z18.h, z29.h\n"
+    "fmax z22.h, p0/M, z22.h, z28.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "fmax z17.h, p0/M, z17.h, z27.h\n"
+    "fmax z21.h, p0/M, z21.h, z26.h\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "fmax z16.h, p0/M, z16.h, z25.h\n"
+    "fmax z20.h, p0/M, z20.h, z24.h\n"
+    "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z18.h, p0/M, z18.h, z22.h\n"
+    "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "fmax z17.h, p0/M, z17.h, z21.h\n"
+    "fmax z16.h, p0/M, z16.h, z20.h\n"
+    "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "fmax z4.h, p0/M, z4.h, z19.h\n"
+    "fmax z3.h, p0/M, z3.h, z18.h\n"
+    "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "fmax z2.h, p0/M, z2.h, z17.h\n"
+    "fmax z1.h, p0/M, z1.h, z16.h\n"
+    "ld1h { z29.h }, p3/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z28.h }, p3/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z26.h }, p2/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
+    "fmax z23.h, p0/M, z23.h, z30.h\n"
+    "fmax z18.h, p0/M, z18.h, z29.h\n"
+    "fmax z22.h, p0/M, z22.h, z28.h\n"
+    "fmax z17.h, p0/M, z17.h, z27.h\n"
+    "fmax z21.h, p0/M, z21.h, z26.h\n"
+    "fmax z16.h, p0/M, z16.h, z25.h\n"
+    "fmax z20.h, p0/M, z20.h, z24.h\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z18.h, p0/M, z18.h, z22.h\n"
+    "fmax z17.h, p0/M, z17.h, z21.h\n"
+    "fmax z16.h, p0/M, z16.h, z20.h\n"
+    "fmax z4.h, p0/M, z4.h, z19.h\n"
+    "fmax z3.h, p0/M, z3.h, z18.h\n"
+    "fmax z2.h, p0/M, z2.h, z17.h\n"
+    "fmax z1.h, p0/M, z1.h, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "subs x20, x20, #0x1\n"
+    "fmax z4.h, p0/M, z4.h, z0.h\n"
+    "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "fmax z3.h, p0/M, z3.h, z18.h\n"
+    "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "fmax z2.h, p0/M, z2.h, z17.h\n"
+    "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "fmax z1.h, p0/M, z1.h, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1h { z4.h }, p4, [%x[outptr], x28, LSL #1]\n"
+    "inch x28, ALL, MUL #4\n"
+    "st1h { z3.h }, p3, [%x[outptr], x27, LSL #1]\n"
+    "inch x27, ALL, MUL #4\n"
+    "st1h { z2.h }, p2, [%x[outptr], x26, LSL #1]\n"
+    "inch x26, ALL, MUL #4\n"
+    "st1h { z1.h }, p1, [%x[outptr], x25, LSL #1]\n"
+    "inch x25, ALL, MUL #4\n"
+    "whilelt p1.h, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.h, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.h, #0xfc00\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
+    "fmax z23.h, p0/M, z23.h, z30.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z4.h, p0/M, z4.h, z19.h\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
+    "fmax z23.h, p0/M, z23.h, z30.h\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z4.h, p0/M, z4.h, z19.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "subs x20, x20, #0x1\n"
+    "fmax z4.h, p0/M, z4.h, z0.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1h { z4.h }, p4, [%x[outptr], x28, LSL #1]\n"
+    "inch x28\n"
+    "whilelt p4.h, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..fa1b441
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
+{
+  using Parent = DepthfirstStrategy<float, float>;
+
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..602ef59
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    float rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<float>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x4, #0x0\n"
+    "mov x19, #0x4\n"
+    "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+    "whilelt p0.s, XZR, x19\n"
+    "add x19, %x[args], %[offsetof_rescale]\n"
+    "ld1rqw { z4.s }, p0/Z, [x19]\n"
+    "ldr x6, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.s, x4, x6\n"
+    "mov x7, #0x0\n"
+    "ldp x8, x17, [x20, #0x0]\n"
+    "ldp x16, x15, [x20, #0x10]\n"
+    "ldp x14, x13, [x5, #0x0]\n"
+    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+    "ldp x12, x11, [x5, #0x10]\n"
+    "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+    "ldp x10, x9, [x5, #0x20]\n"
+    "ld1w { z1.s }, p1/Z, [x9, x4, LSL #2]\n"
+    "ldp x28, x27, [x5, #0x30]\n"
+    "ld1w { z0.s }, p1/Z, [x28, x4, LSL #2]\n"
+    "ldp x26, x25, [x5, #0x40]\n"
+    "ld1w { z31.s }, p1/Z, [x25, x4, LSL #2]\n"
+    "ldp x24, x23, [x5, #0x50]\n"
+    "ld1w { z30.s }, p1/Z, [x24, x4, LSL #2]\n"
+    "ldp x22, x21, [x5, #0x60]\n"
+    "ld1w { z29.s }, p1/Z, [x10, x4, LSL #2]\n"
+    "ldp x20, x19, [x5, #0x70]\n"
+    "ld1w { z28.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "ld1w { z22.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "ld1w { z21.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x14, x4, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "incw x4\n"
+    "whilelt p1.s, x4, x6\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "fadd z17.s, z1.s, z0.s\n"
+    "fadd z16.s, z31.s, z30.s\n"
+    "ld1w { z1.s }, p1/Z, [x9, x4, LSL #2]\n"
+    "whilelt p0.s, x7, x6\n"
+    "fadd z19.s, z17.s, z16.s\n"
+    "fadd z18.s, z3.s, z2.s\n"
+    "ld1w { z0.s }, p1/Z, [x28, x4, LSL #2]\n"
+    "fadd z17.s, z29.s, z28.s\n"
+    "fadd z22.s, z27.s, z22.s\n"
+    "ld1w { z31.s }, p1/Z, [x25, x4, LSL #2]\n"
+    "fadd z16.s, z21.s, z20.s\n"
+    "fadd z21.s, z18.s, z19.s\n"
+    "ld1w { z30.s }, p1/Z, [x24, x4, LSL #2]\n"
+    "fadd z20.s, z16.s, z19.s\n"
+    "fadd z19.s, z26.s, z17.s\n"
+    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+    "fadd z18.s, z25.s, z22.s\n"
+    "fadd z17.s, z24.s, z17.s\n"
+    "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+    "fadd z16.s, z23.s, z22.s\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "ld1w { z29.s }, p1/Z, [x10, x4, LSL #2]\n"
+    "fadd z18.s, z18.s, z21.s\n"
+    "fadd z17.s, z17.s, z20.s\n"
+    "ld1w { z28.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "fadd z16.s, z16.s, z20.s\n"
+    "ld1w { z27.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "fmul z19.s, z19.s, z4.s[0]\n"
+    "ld1w { z22.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "fmul z18.s, z18.s, z4.s[1]\n"
+    "fmul z17.s, z17.s, z4.s[2]\n"
+    "ld1w { z21.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "fmul z16.s, z16.s, z4.s[3]\n"
+    "st1w { z19.s }, p0, [x8, x7, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "st1w { z18.s }, p0, [x17, x7, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x14, x4, LSL #2]\n"
+    "st1w { z17.s }, p0, [x16, x7, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "st1w { z16.s }, p0, [x15, x7, LSL #2]\n"
+    "incw x7\n"
+    "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "incw x4\n"
+    "whilelt p1.s, x4, x6\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "fadd z17.s, z1.s, z0.s\n"
+    "fadd z16.s, z31.s, z30.s\n"
+    "whilelt p0.s, x7, x6\n"
+    "fadd z19.s, z17.s, z16.s\n"
+    "fadd z18.s, z3.s, z2.s\n"
+    "fadd z17.s, z29.s, z28.s\n"
+    "fadd z22.s, z27.s, z22.s\n"
+    "fadd z16.s, z21.s, z20.s\n"
+    "fadd z21.s, z18.s, z19.s\n"
+    "fadd z20.s, z16.s, z19.s\n"
+    "fadd z19.s, z26.s, z17.s\n"
+    "fadd z18.s, z25.s, z22.s\n"
+    "fadd z17.s, z24.s, z17.s\n"
+    "fadd z16.s, z23.s, z22.s\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "fadd z18.s, z18.s, z21.s\n"
+    "fadd z17.s, z17.s, z20.s\n"
+    "fadd z16.s, z16.s, z20.s\n"
+    "fmul z19.s, z19.s, z4.s[0]\n"
+    "st1w { z19.s }, p0, [x8, x7, LSL #2]\n"
+    "fmul z18.s, z18.s, z4.s[1]\n"
+    "fmul z17.s, z17.s, z4.s[2]\n"
+    "st1w { z18.s }, p0, [x17, x7, LSL #2]\n"
+    "fmul z16.s, z16.s, z4.s[3]\n"
+    "st1w { z17.s }, p0, [x16, x7, LSL #2]\n"
+    "st1w { z16.s }, p0, [x15, x7, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..814c89c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct sme_fp32_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
+{
+  using Parent = IGenericDepthfirstStrategy<float, float>;
+  sme_fp32_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_fp32_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..08630db
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_fp32_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntw x27\n"
+    "cntw x26, ALL, MUL #2\n"
+    "cntw x25, ALL, MUL #3\n"
+    "ptrue p0.b\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "ld1rw { z6.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "whilelt p2.s, x27, %x[n_channels]\n"
+    "whilelt p1.s, x26, %x[n_channels]\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov z4.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z3.b, #0x0\n"
+    "mov z2.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z18.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd z23.s, z1.s, z0.s\n"
+    "fadd z19.s, z31.s, z30.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z22.s, z29.s, z22.s\n"
+    "fadd z18.s, z28.s, z18.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "fadd z21.s, z27.s, z21.s\n"
+    "fadd z17.s, z26.s, z17.s\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z20.s, z25.s, z20.s\n"
+    "fadd z16.s, z24.s, z16.s\n"
+    "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "fadd z18.s, z22.s, z18.s\n"
+    "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "fadd z17.s, z21.s, z17.s\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "fadd z5.s, z5.s, z19.s\n"
+    "fadd z4.s, z4.s, z18.s\n"
+    "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "fadd z3.s, z3.s, z17.s\n"
+    "fadd z2.s, z2.s, z16.s\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z18.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd z23.s, z1.s, z0.s\n"
+    "fadd z19.s, z31.s, z30.s\n"
+    "fadd z22.s, z29.s, z22.s\n"
+    "fadd z18.s, z28.s, z18.s\n"
+    "fadd z21.s, z27.s, z21.s\n"
+    "fadd z17.s, z26.s, z17.s\n"
+    "fadd z20.s, z25.s, z20.s\n"
+    "fadd z16.s, z24.s, z16.s\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "fadd z18.s, z22.s, z18.s\n"
+    "fadd z17.s, z21.s, z17.s\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "fadd z5.s, z5.s, z19.s\n"
+    "fadd z4.s, z4.s, z18.s\n"
+    "fadd z3.s, z3.s, z17.s\n"
+    "fadd z2.s, z2.s, z16.s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "subs x20, x20, #0x1\n"
+    "fadd z5.s, z5.s, z1.s\n"
+    "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "fadd z4.s, z4.s, z29.s\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "fadd z3.s, z3.s, z27.s\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "fadd z2.s, z2.s, z25.s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul z5.s, z5.s, z6.s\n"
+    "fmul z4.s, z4.s, z6.s\n"
+    "st1w { z5.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "incw x28, ALL, MUL #4\n"
+    "fmul z3.s, z3.s, z6.s\n"
+    "fmul z2.s, z2.s, z6.s\n"
+    "st1w { z4.s }, p2, [%x[outptr], x27, LSL #2]\n"
+    "incw x27, ALL, MUL #4\n"
+    "st1w { z3.s }, p1, [%x[outptr], x26, LSL #2]\n"
+    "incw x26, ALL, MUL #4\n"
+    "st1w { z2.s }, p0, [%x[outptr], x25, LSL #2]\n"
+    "incw x25, ALL, MUL #4\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd z23.s, z1.s, z0.s\n"
+    "fadd z19.s, z31.s, z30.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z5.s, z5.s, z19.s\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd z23.s, z1.s, z0.s\n"
+    "fadd z19.s, z31.s, z30.s\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "fadd z5.s, z5.s, z19.s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "subs x20, x20, #0x1\n"
+    "fadd z5.s, z5.s, z1.s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul z5.s, z5.s, z6.s\n"
+    "st1w { z5.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "incw x28\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..4e3cd6e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
+{
+  using Parent = DepthfirstStrategy<float, float>;
+
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..be254d3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x14, #0x0\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.s, x14, x12\n"
+    "ldp x11, x10, [x20, #0x0]\n"
+    "ldp x9, x28, [x20, #0x10]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ld1w { z29.s }, p1/Z, [x26, x14, LSL #2]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ld1w { z28.s }, p1/Z, [x24, x14, LSL #2]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ld1w { z27.s }, p1/Z, [x23, x14, LSL #2]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ld1w { z26.s }, p1/Z, [x20, x14, LSL #2]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1w { z20.s }, p1/Z, [x27, x14, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x14, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x25, x14, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x21, x14, LSL #2]\n"
+    "ld1w { z19.s }, p1/Z, [x19, x14, LSL #2]\n"
+    "incw x14\n"
+    "whilelt p1.s, x14, x12\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z29\n fmax z22.s, p2/M, z22.s, z27.s\n"
+    "movprfx z21, z27\n fmax z21.s, p2/M, z21.s, z26.s\n"
+    "ld1w { z29.s }, p1/Z, [x26, x14, LSL #2]\n"
+    "whilelt p0.s, x13, x12\n"
+    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z20.s\n"
+    "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z24.s\n"
+    "ld1w { z27.s }, p1/Z, [x23, x14, LSL #2]\n"
+    "movprfx z17, z23\n fmax z17.s, p2/M, z17.s, z28.s\n"
+    "movprfx z16, z25\n fmax z16.s, p2/M, z16.s, z19.s\n"
+    "ld1w { z26.s }, p1/Z, [x20, x14, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x24, x14, LSL #2]\n"
+    "movprfx z19, z18\n fmax z19.s, p2/M, z19.s, z22.s\n"
+    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z20.s\n"
+    "ld1w { z20.s }, p1/Z, [x27, x14, LSL #2]\n"
+    "fmax z17.s, p2/M, z17.s, z21.s\n"
+    "fmax z16.s, p2/M, z16.s, z21.s\n"
+    "ld1w { z25.s }, p1/Z, [x22, x14, LSL #2]\n"
+    "st1w { z19.s }, p0, [x11, x13, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x25, x14, LSL #2]\n"
+    "st1w { z18.s }, p0, [x10, x13, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x21, x14, LSL #2]\n"
+    "st1w { z17.s }, p0, [x9, x13, LSL #2]\n"
+    "ld1w { z19.s }, p1/Z, [x19, x14, LSL #2]\n"
+    "incw x14\n"
+    "whilelt p1.s, x14, x12\n"
+    "st1w { z16.s }, p0, [x28, x13, LSL #2]\n"
+    "incw x13\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z29\n fmax z22.s, p2/M, z22.s, z27.s\n"
+    "movprfx z21, z27\n fmax z21.s, p2/M, z21.s, z26.s\n"
+    "whilelt p0.s, x13, x12\n"
+    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z20.s\n"
+    "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z24.s\n"
+    "movprfx z17, z23\n fmax z17.s, p2/M, z17.s, z28.s\n"
+    "movprfx z16, z25\n fmax z16.s, p2/M, z16.s, z19.s\n"
+    "movprfx z19, z18\n fmax z19.s, p2/M, z19.s, z22.s\n"
+    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z20.s\n"
+    "st1w { z19.s }, p0, [x11, x13, LSL #2]\n"
+    "fmax z17.s, p2/M, z17.s, z21.s\n"
+    "fmax z16.s, p2/M, z16.s, z21.s\n"
+    "st1w { z18.s }, p0, [x10, x13, LSL #2]\n"
+    "st1w { z17.s }, p0, [x9, x13, LSL #2]\n"
+    "st1w { z16.s }, p0, [x28, x13, LSL #2]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..0c0e445
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_fp32_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct sme_fp32_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
+{
+  using Parent = IGenericDepthfirstStrategy<float, float>;
+  sme_fp32_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_fp32_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..b9f90ea
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_fp32_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntw x27\n"
+    "cntw x26, ALL, MUL #2\n"
+    "cntw x25, ALL, MUL #3\n"
+    "whilelt p4.s, x28, %x[n_channels]\n"
+    "whilelt p3.s, x27, %x[n_channels]\n"
+    "whilelt p2.s, x26, %x[n_channels]\n"
+    "whilelt p1.s, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.s, #0xff800000\n"
+    "mov z3.s, #0xff800000\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.s, #0xff800000\n"
+    "mov z1.s, #0xff800000\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z29.s }, p3/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z28.s }, p3/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z26.s }, p2/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
+    "fmax z23.s, p0/M, z23.s, z30.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z18.s, p0/M, z18.s, z29.s\n"
+    "fmax z22.s, p0/M, z22.s, z28.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "fmax z17.s, p0/M, z17.s, z27.s\n"
+    "fmax z21.s, p0/M, z21.s, z26.s\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "fmax z16.s, p0/M, z16.s, z25.s\n"
+    "fmax z20.s, p0/M, z20.s, z24.s\n"
+    "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z18.s, p0/M, z18.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "fmax z17.s, p0/M, z17.s, z21.s\n"
+    "fmax z16.s, p0/M, z16.s, z20.s\n"
+    "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "fmax z4.s, p0/M, z4.s, z19.s\n"
+    "fmax z3.s, p0/M, z3.s, z18.s\n"
+    "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "fmax z2.s, p0/M, z2.s, z17.s\n"
+    "fmax z1.s, p0/M, z1.s, z16.s\n"
+    "ld1w { z29.s }, p3/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z28.s }, p3/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z26.s }, p2/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
+    "fmax z23.s, p0/M, z23.s, z30.s\n"
+    "fmax z18.s, p0/M, z18.s, z29.s\n"
+    "fmax z22.s, p0/M, z22.s, z28.s\n"
+    "fmax z17.s, p0/M, z17.s, z27.s\n"
+    "fmax z21.s, p0/M, z21.s, z26.s\n"
+    "fmax z16.s, p0/M, z16.s, z25.s\n"
+    "fmax z20.s, p0/M, z20.s, z24.s\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z18.s, p0/M, z18.s, z22.s\n"
+    "fmax z17.s, p0/M, z17.s, z21.s\n"
+    "fmax z16.s, p0/M, z16.s, z20.s\n"
+    "fmax z4.s, p0/M, z4.s, z19.s\n"
+    "fmax z3.s, p0/M, z3.s, z18.s\n"
+    "fmax z2.s, p0/M, z2.s, z17.s\n"
+    "fmax z1.s, p0/M, z1.s, z16.s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "subs x20, x20, #0x1\n"
+    "fmax z4.s, p0/M, z4.s, z0.s\n"
+    "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "fmax z3.s, p0/M, z3.s, z18.s\n"
+    "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "fmax z2.s, p0/M, z2.s, z17.s\n"
+    "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "fmax z1.s, p0/M, z1.s, z16.s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1w { z4.s }, p4, [%x[outptr], x28, LSL #2]\n"
+    "incw x28, ALL, MUL #4\n"
+    "st1w { z3.s }, p3, [%x[outptr], x27, LSL #2]\n"
+    "incw x27, ALL, MUL #4\n"
+    "st1w { z2.s }, p2, [%x[outptr], x26, LSL #2]\n"
+    "incw x26, ALL, MUL #4\n"
+    "st1w { z1.s }, p1, [%x[outptr], x25, LSL #2]\n"
+    "incw x25, ALL, MUL #4\n"
+    "whilelt p1.s, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.s, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.s, #0xff800000\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
+    "fmax z23.s, p0/M, z23.s, z30.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z4.s, p0/M, z4.s, z19.s\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
+    "fmax z23.s, p0/M, z23.s, z30.s\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z4.s, p0/M, z4.s, z19.s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "subs x20, x20, #0x1\n"
+    "fmax z4.s, p0/M, z4.s, z0.s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1w { z4.s }, p4, [%x[outptr], x28, LSL #2]\n"
+    "incw x28\n"
+    "whilelt p4.s, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..e383a4c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct sme_s8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
+{
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
+  sme_s8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_s8_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..c5066d1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555556, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555556, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sme_s8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x4508a3b5  // sshllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508a7b4  // sshllt z20.h, z29.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508a373  // sshllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508a772  // sshllt z18.h, z27.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508a331  // sshllb z17.h, z25.b, #0x0\n"
+    ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
+    ".inst 0x04b1756b  // sqdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x04b1754a  // sqdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x04b17508  // sqdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x04b174c6  // sqdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x04b17484  // sqdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x04b17400  // sqdmulh z0.s, z0.s, z17.s\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p0/M, z11.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z19.s\n"
+    "trn1 z22.h, z11.h, z10.h\n"
+    "smin z9.s, p0/M, z9.s, z19.s\n"
+    "smin z8.s, p0/M, z8.s, z19.s\n"
+    "trn1 z18.h, z9.h, z8.h\n"
+    "smin z7.s, p0/M, z7.s, z19.s\n"
+    "smin z6.s, p0/M, z6.s, z19.s\n"
+    "trn1 z21.h, z7.h, z6.h\n"
+    "smin z5.s, p0/M, z5.s, z19.s\n"
+    "smin z4.s, p0/M, z4.s, z19.s\n"
+    "trn1 z17.h, z5.h, z4.h\n"
+    "smin z3.s, p0/M, z3.s, z19.s\n"
+    "smin z2.s, p0/M, z2.s, z19.s\n"
+    "trn1 z20.h, z3.h, z2.h\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z19.h, z1.h, z0.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x24]\n"
+    "incb x24, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..1613970
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<int8_t, int8_t>
+{
+  using Parent = DepthfirstStrategy<int8_t, int8_t>;
+
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..d25bec0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const int8_t *const *const inptrs;
+    int8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const int8_t *const *input_ptrs,
+      int8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x14, #0x0\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.b, x14, x12\n"
+    "ldp x11, x10, [x20, #0x0]\n"
+    "ldp x9, x28, [x20, #0x10]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+    "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+    "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+    "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+    "incw x14\n"
+    "whilelt p1.b, x14, x12\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z29\n smax z22.b, p2/M, z22.b, z27.b\n"
+    "movprfx z21, z27\n smax z21.b, p2/M, z21.b, z26.b\n"
+    "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+    "whilelt p0.b, x13, x12\n"
+    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+    "movprfx z17, z23\n smax z17.b, p2/M, z17.b, z28.b\n"
+    "movprfx z16, z25\n smax z16.b, p2/M, z16.b, z19.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+    "movprfx z19, z18\n smax z19.b, p2/M, z19.b, z22.b\n"
+    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+    "smax z17.b, p2/M, z17.b, z21.b\n"
+    "smax z16.b, p2/M, z16.b, z21.b\n"
+    "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+    "st1b { z19.b }, p0, [x11, x13]\n"
+    "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+    "st1b { z18.b }, p0, [x10, x13]\n"
+    "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+    "st1b { z17.b }, p0, [x9, x13]\n"
+    "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+    "incw x14\n"
+    "whilelt p1.b, x14, x12\n"
+    "st1b { z16.b }, p0, [x28, x13]\n"
+    "incw x13\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z29\n smax z22.b, p2/M, z22.b, z27.b\n"
+    "movprfx z21, z27\n smax z21.b, p2/M, z21.b, z26.b\n"
+    "whilelt p0.b, x13, x12\n"
+    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z24.b\n"
+    "movprfx z17, z23\n smax z17.b, p2/M, z17.b, z28.b\n"
+    "movprfx z16, z25\n smax z16.b, p2/M, z16.b, z19.b\n"
+    "movprfx z19, z18\n smax z19.b, p2/M, z19.b, z22.b\n"
+    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "st1b { z19.b }, p0, [x11, x13]\n"
+    "smax z17.b, p2/M, z17.b, z21.b\n"
+    "smax z16.b, p2/M, z16.b, z21.b\n"
+    "st1b { z18.b }, p0, [x10, x13]\n"
+    "st1b { z17.b }, p0, [x9, x13]\n"
+    "st1b { z16.b }, p0, [x28, x13]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..56aa120
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct sme_s8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
+{
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
+  sme_s8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_s8_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..86ad4fe
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_s8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x80\n"
+    "mov z3.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.b, #0x80\n"
+    "mov z1.b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z18.b, p0/M, z18.b, z29.b\n"
+    "smax z22.b, p0/M, z22.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "smax z17.b, p0/M, z17.b, z27.b\n"
+    "smax z21.b, p0/M, z21.b, z26.b\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "smax z18.b, p0/M, z18.b, z29.b\n"
+    "smax z22.b, p0/M, z22.b, z28.b\n"
+    "smax z17.b, p0/M, z17.b, z27.b\n"
+    "smax z21.b, p0/M, z21.b, z26.b\n"
+    "smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "smax z4.b, p0/M, z4.b, z0.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "st1b { z3.b }, p3, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z2.b }, p2, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z1.b }, p1, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "smax z4.b, p0/M, z4.b, z0.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..ee02c60
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct sme_s8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
+{
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
+  sme_s8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_s8q_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..28b7426
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include "src/core/NEON/kernels/assembly/pooling.hpp"
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555556, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555556, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sme_s8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "v16", "v17", "v18"
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x4508a3b5  // sshllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508a7b4  // sshllt z20.h, z29.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508a373  // sshllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508a772  // sshllt z18.h, z27.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508a331  // sshllb z17.h, z25.b, #0x0\n"
+    ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482824b  // srshl z11.s, p0/M, z11.s, z18.s\n"
+    ".inst 0x4482824a  // srshl z10.s, p0/M, z10.s, z18.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x44828249  // srshl z9.s, p0/M, z9.s, z18.s\n"
+    ".inst 0x44828248  // srshl z8.s, p0/M, z8.s, z18.s\n"
+    ".inst 0x44828247  // srshl z7.s, p0/M, z7.s, z18.s\n"
+    ".inst 0x44828246  // srshl z6.s, p0/M, z6.s, z18.s\n"
+    ".inst 0x44828245  // srshl z5.s, p0/M, z5.s, z18.s\n"
+    ".inst 0x44828244  // srshl z4.s, p0/M, z4.s, z18.s\n"
+    ".inst 0x44828243  // srshl z3.s, p0/M, z3.s, z18.s\n"
+    ".inst 0x44828242  // srshl z2.s, p0/M, z2.s, z18.s\n"
+    ".inst 0x44828241  // srshl z1.s, p0/M, z1.s, z18.s\n"
+    ".inst 0x44828240  // srshl z0.s, p0/M, z0.s, z18.s\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x04b1756b  // sqrdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x04b1754a  // sqrdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqrdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x04b17508  // sqrdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqrdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x04b174c6  // sqrdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqrdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x04b17484  // sqrdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqrdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqrdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqrdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x04b17400  // sqrdmulh z0.s, z0.s, z17.s\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p0/M, z11.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z19.s\n"
+    "trn1 z22.h, z11.h, z10.h\n"
+    "smin z9.s, p0/M, z9.s, z19.s\n"
+    "smin z8.s, p0/M, z8.s, z19.s\n"
+    "trn1 z18.h, z9.h, z8.h\n"
+    "smin z7.s, p0/M, z7.s, z19.s\n"
+    "smin z6.s, p0/M, z6.s, z19.s\n"
+    "trn1 z21.h, z7.h, z6.h\n"
+    "smin z5.s, p0/M, z5.s, z19.s\n"
+    "smin z4.s, p0/M, z4.s, z19.s\n"
+    "trn1 z17.h, z5.h, z4.h\n"
+    "smin z3.s, p0/M, z3.s, z19.s\n"
+    "smin z2.s, p0/M, z2.s, z19.s\n"
+    "trn1 z20.h, z3.h, z2.h\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z19.h, z1.h, z0.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x24]\n"
+    "incb x24, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..050aff3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_s8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct sme_s8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
+{
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
+  sme_s8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_s8q_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..3d13991
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include "src/core/NEON/kernels/assembly/pooling.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_s8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x80\n"
+    "mov z3.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.b, #0x80\n"
+    "mov z1.b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z18.b, p0/M, z18.b, z29.b\n"
+    "smax z22.b, p0/M, z22.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "smax z17.b, p0/M, z17.b, z27.b\n"
+    "smax z21.b, p0/M, z21.b, z26.b\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "smax z18.b, p0/M, z18.b, z29.b\n"
+    "smax z22.b, p0/M, z22.b, z28.b\n"
+    "smax z17.b, p0/M, z17.b, z27.b\n"
+    "smax z21.b, p0/M, z21.b, z26.b\n"
+    "smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "smax z4.b, p0/M, z4.b, z0.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "smax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "smax z2.b, p0/M, z2.b, z17.b\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "smax z1.b, p0/M, z1.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    ".inst 0x4508a097  // sshllb z23.h, z4.b, #0x0\n"
+    ".inst 0x4508a496  // sshllt z22.h, z4.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a075  // sshllb z21.h, z3.b, #0x0\n"
+    ".inst 0x4508a472  // sshllt z18.h, z3.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a054  // sshllb z20.h, z2.b, #0x0\n"
+    ".inst 0x4508a451  // sshllt z17.h, z2.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a033  // sshllb z19.h, z1.b, #0x0\n"
+    ".inst 0x4508a430  // sshllt z16.h, z1.b, #0x0\n"
+    ".inst 0x4510a2e1  // sshllb z1.s, z23.h, #0x0\n"
+    ".inst 0x4510a6f7  // sshllt z23.s, z23.h, #0x0\n"
+    ".inst 0x4510a2c0  // sshllb z0.s, z22.h, #0x0\n"
+    ".inst 0x4510a6df  // sshllt z31.s, z22.h, #0x0\n"
+    ".inst 0x4510a2be  // sshllb z30.s, z21.h, #0x0\n"
+    ".inst 0x4510a6b6  // sshllt z22.s, z21.h, #0x0\n"
+    ".inst 0x4510a25d  // sshllb z29.s, z18.h, #0x0\n"
+    ".inst 0x4510a652  // sshllt z18.s, z18.h, #0x0\n"
+    ".inst 0x4510a29c  // sshllb z28.s, z20.h, #0x0\n"
+    ".inst 0x4510a695  // sshllt z21.s, z20.h, #0x0\n"
+    ".inst 0x4510a23b  // sshllb z27.s, z17.h, #0x0\n"
+    ".inst 0x4510a631  // sshllt z17.s, z17.h, #0x0\n"
+    ".inst 0x4510a27a  // sshllb z26.s, z19.h, #0x0\n"
+    ".inst 0x4510a674  // sshllt z20.s, z19.h, #0x0\n"
+    ".inst 0x4510a219  // sshllb z25.s, z16.h, #0x0\n"
+    ".inst 0x4510a618  // sshllt z24.s, z16.h, #0x0\n"
+    ".inst 0x44828081  // srshl z1.s, p0/M, z1.s, z4.s\n"
+    ".inst 0x44828097  // srshl z23.s, p0/M, z23.s, z4.s\n"
+    ".inst 0x44828080  // srshl z0.s, p0/M, z0.s, z4.s\n"
+    ".inst 0x4482809f  // srshl z31.s, p0/M, z31.s, z4.s\n"
+    ".inst 0x4482809e  // srshl z30.s, p0/M, z30.s, z4.s\n"
+    ".inst 0x44828096  // srshl z22.s, p0/M, z22.s, z4.s\n"
+    ".inst 0x4482809d  // srshl z29.s, p0/M, z29.s, z4.s\n"
+    ".inst 0x44828092  // srshl z18.s, p0/M, z18.s, z4.s\n"
+    ".inst 0x4482809c  // srshl z28.s, p0/M, z28.s, z4.s\n"
+    ".inst 0x44828095  // srshl z21.s, p0/M, z21.s, z4.s\n"
+    ".inst 0x4482809b  // srshl z27.s, p0/M, z27.s, z4.s\n"
+    ".inst 0x44828091  // srshl z17.s, p0/M, z17.s, z4.s\n"
+    ".inst 0x4482809a  // srshl z26.s, p0/M, z26.s, z4.s\n"
+    ".inst 0x44828094  // srshl z20.s, p0/M, z20.s, z4.s\n"
+    ".inst 0x44828099  // srshl z25.s, p0/M, z25.s, z4.s\n"
+    ".inst 0x44828098  // srshl z24.s, p0/M, z24.s, z4.s\n"
+    ".inst 0x04a37421  // sqrdmulh z1.s, z1.s, z3.s\n"
+    ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
+    ".inst 0x04a376d6  // sqrdmulh z22.s, z22.s, z3.s\n"
+    ".inst 0x04a377bd  // sqrdmulh z29.s, z29.s, z3.s\n"
+    ".inst 0x04a37652  // sqrdmulh z18.s, z18.s, z3.s\n"
+    ".inst 0x04a3779c  // sqrdmulh z28.s, z28.s, z3.s\n"
+    ".inst 0x04a376b5  // sqrdmulh z21.s, z21.s, z3.s\n"
+    ".inst 0x04a3777b  // sqrdmulh z27.s, z27.s, z3.s\n"
+    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x04a3775a  // sqrdmulh z26.s, z26.s, z3.s\n"
+    ".inst 0x04a37694  // sqrdmulh z20.s, z20.s, z3.s\n"
+    ".inst 0x04a37739  // sqrdmulh z25.s, z25.s, z3.s\n"
+    ".inst 0x04a37718  // sqrdmulh z24.s, z24.s, z3.s\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x44828041  // srshl z1.s, p0/M, z1.s, z2.s\n"
+    ".inst 0x44828057  // srshl z23.s, p0/M, z23.s, z2.s\n"
+    ".inst 0x44828040  // srshl z0.s, p0/M, z0.s, z2.s\n"
+    ".inst 0x4482805f  // srshl z31.s, p0/M, z31.s, z2.s\n"
+    ".inst 0x4482805e  // srshl z30.s, p0/M, z30.s, z2.s\n"
+    ".inst 0x44828056  // srshl z22.s, p0/M, z22.s, z2.s\n"
+    ".inst 0x4482805d  // srshl z29.s, p0/M, z29.s, z2.s\n"
+    ".inst 0x44828052  // srshl z18.s, p0/M, z18.s, z2.s\n"
+    ".inst 0x4482805c  // srshl z28.s, p0/M, z28.s, z2.s\n"
+    ".inst 0x44828055  // srshl z21.s, p0/M, z21.s, z2.s\n"
+    ".inst 0x4482805b  // srshl z27.s, p0/M, z27.s, z2.s\n"
+    ".inst 0x44828051  // srshl z17.s, p0/M, z17.s, z2.s\n"
+    ".inst 0x4482805a  // srshl z26.s, p0/M, z26.s, z2.s\n"
+    ".inst 0x44828054  // srshl z20.s, p0/M, z20.s, z2.s\n"
+    ".inst 0x44828059  // srshl z25.s, p0/M, z25.s, z2.s\n"
+    ".inst 0x44828058  // srshl z24.s, p0/M, z24.s, z2.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smax z30.s, p0/M, z30.s, z16.s\n"
+    "smax z22.s, p0/M, z22.s, z16.s\n"
+    "smax z29.s, p0/M, z29.s, z16.s\n"
+    "smax z18.s, p0/M, z18.s, z16.s\n"
+    "smax z28.s, p0/M, z28.s, z16.s\n"
+    "smax z21.s, p0/M, z21.s, z16.s\n"
+    "smax z27.s, p0/M, z27.s, z16.s\n"
+    "smax z17.s, p0/M, z17.s, z16.s\n"
+    "smax z26.s, p0/M, z26.s, z16.s\n"
+    "smax z20.s, p0/M, z20.s, z16.s\n"
+    "smax z25.s, p0/M, z25.s, z16.s\n"
+    "smax z24.s, p0/M, z24.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z23.s, p0/M, z23.s, z19.s\n"
+    "trn1 z23.h, z1.h, z23.h\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "smin z31.s, p0/M, z31.s, z19.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
+    "smin z30.s, p0/M, z30.s, z19.s\n"
+    "smin z22.s, p0/M, z22.s, z19.s\n"
+    "trn1 z22.h, z30.h, z22.h\n"
+    "smin z29.s, p0/M, z29.s, z19.s\n"
+    "smin z18.s, p0/M, z18.s, z19.s\n"
+    "trn1 z18.h, z29.h, z18.h\n"
+    "smin z28.s, p0/M, z28.s, z19.s\n"
+    "smin z21.s, p0/M, z21.s, z19.s\n"
+    "trn1 z21.h, z28.h, z21.h\n"
+    "smin z27.s, p0/M, z27.s, z19.s\n"
+    "smin z17.s, p0/M, z17.s, z19.s\n"
+    "trn1 z17.h, z27.h, z17.h\n"
+    "smin z26.s, p0/M, z26.s, z19.s\n"
+    "smin z20.s, p0/M, z20.s, z19.s\n"
+    "trn1 z20.h, z26.h, z20.h\n"
+    "smin z25.s, p0/M, z25.s, z19.s\n"
+    "smin z24.s, p0/M, z24.s, z19.s\n"
+    "trn1 z19.h, z25.h, z24.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
+    "smax z23.b, p0/M, z23.b, z30.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z4.b, p0/M, z4.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "smax z4.b, p0/M, z4.b, z0.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    ".inst 0x4508a097  // sshllb z23.h, z4.b, #0x0\n"
+    ".inst 0x4508a496  // sshllt z22.h, z4.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4510a2e1  // sshllb z1.s, z23.h, #0x0\n"
+    ".inst 0x4510a6f7  // sshllt z23.s, z23.h, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x4510a2c0  // sshllb z0.s, z22.h, #0x0\n"
+    ".inst 0x4510a6df  // sshllt z31.s, z22.h, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x44828081  // srshl z1.s, p0/M, z1.s, z4.s\n"
+    ".inst 0x44828097  // srshl z23.s, p0/M, z23.s, z4.s\n"
+    ".inst 0x44828080  // srshl z0.s, p0/M, z0.s, z4.s\n"
+    ".inst 0x4482809f  // srshl z31.s, p0/M, z31.s, z4.s\n"
+    ".inst 0x04a37421  // sqrdmulh z1.s, z1.s, z3.s\n"
+    ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    "mov z19.s, #0x7f\n"
+    ".inst 0x44828041  // srshl z1.s, p0/M, z1.s, z2.s\n"
+    ".inst 0x44828057  // srshl z23.s, p0/M, z23.s, z2.s\n"
+    ".inst 0x44828040  // srshl z0.s, p0/M, z0.s, z2.s\n"
+    ".inst 0x4482805f  // srshl z31.s, p0/M, z31.s, z2.s\n"
+    "not z16.s, p0/M, z19.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z23.s, p0/M, z23.s, z19.s\n"
+    "trn1 z23.h, z1.h, z23.h\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "smin z31.s, p0/M, z31.s, z19.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..2cdb288
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct sme_u8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
+{
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
+  sme_u8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_u8_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..e529e4c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555556, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555556, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sme_u8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x4508abb5  // ushllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508afb4  // ushllt z20.h, z29.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508ab73  // ushllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508af72  // ushllt z18.h, z27.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508ab31  // ushllb z17.h, z25.b, #0x0\n"
+    ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
+    ".inst 0x04b1756b  // sqdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x04b1754a  // sqdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x04b17508  // sqdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x04b174c6  // sqdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x04b17484  // sqdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x04b17400  // sqdmulh z0.s, z0.s, z17.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p0/M, z11.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z19.s\n"
+    "trn1 z22.h, z11.h, z10.h\n"
+    "smin z9.s, p0/M, z9.s, z19.s\n"
+    "smin z8.s, p0/M, z8.s, z19.s\n"
+    "trn1 z18.h, z9.h, z8.h\n"
+    "smin z7.s, p0/M, z7.s, z19.s\n"
+    "smin z6.s, p0/M, z6.s, z19.s\n"
+    "trn1 z21.h, z7.h, z6.h\n"
+    "smin z5.s, p0/M, z5.s, z19.s\n"
+    "smin z4.s, p0/M, z4.s, z19.s\n"
+    "trn1 z17.h, z5.h, z4.h\n"
+    "smin z3.s, p0/M, z3.s, z19.s\n"
+    "smin z2.s, p0/M, z2.s, z19.s\n"
+    "trn1 z20.h, z3.h, z2.h\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z19.h, z1.h, z0.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x24]\n"
+    "incb x24, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..6d5f53d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<uint8_t, uint8_t>
+{
+  using Parent = DepthfirstStrategy<uint8_t, uint8_t>;
+
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
+
+  sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
+
+  Parent::KernelType get_kernel(void) const { return sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..d76755a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const uint8_t *const *const inptrs;
+    uint8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const uint8_t *const *input_ptrs,
+      uint8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x14, #0x0\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+    "whilelt p1.b, x14, x12\n"
+    "ldp x11, x10, [x20, #0x0]\n"
+    "ldp x9, x28, [x20, #0x10]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+    "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+    "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+    "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+    "incw x14\n"
+    "whilelt p1.b, x14, x12\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z29\n umax z22.b, p2/M, z22.b, z27.b\n"
+    "movprfx z21, z27\n umax z21.b, p2/M, z21.b, z26.b\n"
+    "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+    "whilelt p0.b, x13, x12\n"
+    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z20.b\n"
+    "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+    "movprfx z17, z23\n umax z17.b, p2/M, z17.b, z28.b\n"
+    "movprfx z16, z25\n umax z16.b, p2/M, z16.b, z19.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+    "movprfx z19, z18\n umax z19.b, p2/M, z19.b, z22.b\n"
+    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z20.b\n"
+    "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+    "umax z17.b, p2/M, z17.b, z21.b\n"
+    "umax z16.b, p2/M, z16.b, z21.b\n"
+    "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+    "st1b { z19.b }, p0, [x11, x13]\n"
+    "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+    "st1b { z18.b }, p0, [x10, x13]\n"
+    "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+    "st1b { z17.b }, p0, [x9, x13]\n"
+    "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+    "incw x14\n"
+    "whilelt p1.b, x14, x12\n"
+    "st1b { z16.b }, p0, [x28, x13]\n"
+    "incw x13\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z29\n umax z22.b, p2/M, z22.b, z27.b\n"
+    "movprfx z21, z27\n umax z21.b, p2/M, z21.b, z26.b\n"
+    "whilelt p0.b, x13, x12\n"
+    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z20.b\n"
+    "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z24.b\n"
+    "movprfx z17, z23\n umax z17.b, p2/M, z17.b, z28.b\n"
+    "movprfx z16, z25\n umax z16.b, p2/M, z16.b, z19.b\n"
+    "movprfx z19, z18\n umax z19.b, p2/M, z19.b, z22.b\n"
+    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z20.b\n"
+    "st1b { z19.b }, p0, [x11, x13]\n"
+    "umax z17.b, p2/M, z17.b, z21.b\n"
+    "umax z16.b, p2/M, z16.b, z21.b\n"
+    "st1b { z18.b }, p0, [x10, x13]\n"
+    "st1b { z17.b }, p0, [x9, x13]\n"
+    "st1b { z16.b }, p0, [x28, x13]\n"
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..5c637ec
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct sme_u8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
+{
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
+  sme_u8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_u8_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..21af2eb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_u8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x0\n"
+    "mov z3.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.b, #0x0\n"
+    "mov z1.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z18.b, p0/M, z18.b, z29.b\n"
+    "umax z22.b, p0/M, z22.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "umax z17.b, p0/M, z17.b, z27.b\n"
+    "umax z21.b, p0/M, z21.b, z26.b\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "umax z4.b, p0/M, z4.b, z19.b\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "umax z18.b, p0/M, z18.b, z29.b\n"
+    "umax z22.b, p0/M, z22.b, z28.b\n"
+    "umax z17.b, p0/M, z17.b, z27.b\n"
+    "umax z21.b, p0/M, z21.b, z26.b\n"
+    "umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "umax z4.b, p0/M, z4.b, z19.b\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "umax z4.b, p0/M, z4.b, z0.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "st1b { z3.b }, p3, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z2.b }, p2, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z1.b }, p1, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax z4.b, p0/M, z4.b, z19.b\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z4.b, p0/M, z4.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "umax z4.b, p0/M, z4.b, z0.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..2930993
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct sme_u8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
+{
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
+  sme_u8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_u8q_nhwc_avg_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..8a3cafa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include "src/core/NEON/kernels/assembly/pooling.hpp"
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555556, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555556, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sme_u8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+
+  // Initialise the accumulators such that the offsets are subtracted for all
+  // valid inputs.
+  const int32_t accumulator_init = -qp.input_offset * n_valid_cells;
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "v16", "v17", "v18"
+  );
+
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z14.d, z15.d\n"
+    "mov z13.d, z15.d\n"
+    "mov z12.d, z15.d\n"
+    "mov z11.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z10.d, z15.d\n"
+    "mov z9.d, z15.d\n"
+    "mov z8.d, z15.d\n"
+    "mov z7.d, z15.d\n"
+    "mov z6.d, z15.d\n"
+    "mov z5.d, z15.d\n"
+    "mov z4.d, z15.d\n"
+    "mov z3.d, z15.d\n"
+    "mov z2.d, z15.d\n"
+    "mov z1.d, z15.d\n"
+    "mov z0.d, z15.d\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    ".inst 0x4508abb5  // ushllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508afb4  // ushllt z20.h, z29.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508ab73  // ushllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508af72  // ushllt z18.h, z27.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508ab31  // ushllb z17.h, z25.b, #0x0\n"
+    ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
+    ".inst 0x4482826f  // srshl z15.s, p0/M, z15.s, z19.s\n"
+    ".inst 0x4482826e  // srshl z14.s, p0/M, z14.s, z19.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    ".inst 0x4482826d  // srshl z13.s, p0/M, z13.s, z19.s\n"
+    ".inst 0x4482826c  // srshl z12.s, p0/M, z12.s, z19.s\n"
+    "ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482826b  // srshl z11.s, p0/M, z11.s, z19.s\n"
+    ".inst 0x4482826a  // srshl z10.s, p0/M, z10.s, z19.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x44828269  // srshl z9.s, p0/M, z9.s, z19.s\n"
+    ".inst 0x44828268  // srshl z8.s, p0/M, z8.s, z19.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x44828267  // srshl z7.s, p0/M, z7.s, z19.s\n"
+    ".inst 0x44828266  // srshl z6.s, p0/M, z6.s, z19.s\n"
+    ".inst 0x44828265  // srshl z5.s, p0/M, z5.s, z19.s\n"
+    ".inst 0x44828264  // srshl z4.s, p0/M, z4.s, z19.s\n"
+    ".inst 0x44828263  // srshl z3.s, p0/M, z3.s, z19.s\n"
+    ".inst 0x44828262  // srshl z2.s, p0/M, z2.s, z19.s\n"
+    ".inst 0x44828261  // srshl z1.s, p0/M, z1.s, z19.s\n"
+    ".inst 0x44828260  // srshl z0.s, p0/M, z0.s, z19.s\n"
+    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
+    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
+    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
+    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
+    ".inst 0x04b2756b  // sqrdmulh z11.s, z11.s, z18.s\n"
+    ".inst 0x04b2754a  // sqrdmulh z10.s, z10.s, z18.s\n"
+    ".inst 0x04b27529  // sqrdmulh z9.s, z9.s, z18.s\n"
+    ".inst 0x04b27508  // sqrdmulh z8.s, z8.s, z18.s\n"
+    ".inst 0x04b274e7  // sqrdmulh z7.s, z7.s, z18.s\n"
+    ".inst 0x04b274c6  // sqrdmulh z6.s, z6.s, z18.s\n"
+    ".inst 0x04b274a5  // sqrdmulh z5.s, z5.s, z18.s\n"
+    ".inst 0x04b27484  // sqrdmulh z4.s, z4.s, z18.s\n"
+    ".inst 0x04b27463  // sqrdmulh z3.s, z3.s, z18.s\n"
+    ".inst 0x04b27442  // sqrdmulh z2.s, z2.s, z18.s\n"
+    ".inst 0x04b27421  // sqrdmulh z1.s, z1.s, z18.s\n"
+    ".inst 0x04b27400  // sqrdmulh z0.s, z0.s, z18.s\n"
+    ".inst 0x4482822f  // srshl z15.s, p0/M, z15.s, z17.s\n"
+    ".inst 0x4482822e  // srshl z14.s, p0/M, z14.s, z17.s\n"
+    ".inst 0x4482822d  // srshl z13.s, p0/M, z13.s, z17.s\n"
+    ".inst 0x4482822c  // srshl z12.s, p0/M, z12.s, z17.s\n"
+    ".inst 0x4482822b  // srshl z11.s, p0/M, z11.s, z17.s\n"
+    ".inst 0x4482822a  // srshl z10.s, p0/M, z10.s, z17.s\n"
+    ".inst 0x44828229  // srshl z9.s, p0/M, z9.s, z17.s\n"
+    ".inst 0x44828228  // srshl z8.s, p0/M, z8.s, z17.s\n"
+    ".inst 0x44828227  // srshl z7.s, p0/M, z7.s, z17.s\n"
+    ".inst 0x44828226  // srshl z6.s, p0/M, z6.s, z17.s\n"
+    ".inst 0x44828225  // srshl z5.s, p0/M, z5.s, z17.s\n"
+    ".inst 0x44828224  // srshl z4.s, p0/M, z4.s, z17.s\n"
+    ".inst 0x44828223  // srshl z3.s, p0/M, z3.s, z17.s\n"
+    ".inst 0x44828222  // srshl z2.s, p0/M, z2.s, z17.s\n"
+    ".inst 0x44828221  // srshl z1.s, p0/M, z1.s, z17.s\n"
+    ".inst 0x44828220  // srshl z0.s, p0/M, z0.s, z17.s\n"
+    "add z15.s, z15.s, z16.s\n"
+    "add z14.s, z14.s, z16.s\n"
+    "add z13.s, z13.s, z16.s\n"
+    "add z12.s, z12.s, z16.s\n"
+    "add z11.s, z11.s, z16.s\n"
+    "add z10.s, z10.s, z16.s\n"
+    "add z9.s, z9.s, z16.s\n"
+    "add z8.s, z8.s, z16.s\n"
+    "add z7.s, z7.s, z16.s\n"
+    "add z6.s, z6.s, z16.s\n"
+    "add z5.s, z5.s, z16.s\n"
+    "add z4.s, z4.s, z16.s\n"
+    "add z3.s, z3.s, z16.s\n"
+    "add z2.s, z2.s, z16.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p0/M, z11.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z19.s\n"
+    "trn1 z22.h, z11.h, z10.h\n"
+    "smin z9.s, p0/M, z9.s, z19.s\n"
+    "smin z8.s, p0/M, z8.s, z19.s\n"
+    "trn1 z18.h, z9.h, z8.h\n"
+    "smin z7.s, p0/M, z7.s, z19.s\n"
+    "smin z6.s, p0/M, z6.s, z19.s\n"
+    "trn1 z21.h, z7.h, z6.h\n"
+    "smin z5.s, p0/M, z5.s, z19.s\n"
+    "smin z4.s, p0/M, z4.s, z19.s\n"
+    "trn1 z17.h, z5.h, z4.h\n"
+    "smin z3.s, p0/M, z3.s, z19.s\n"
+    "smin z2.s, p0/M, z2.s, z19.s\n"
+    "trn1 z20.h, z3.h, z2.h\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z19.h, z1.h, z0.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x24]\n"
+    "incb x24, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z14.d, z15.d\n"
+    "mov z13.d, z15.d\n"
+    "mov z12.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "subs x20, x20, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
+    ".inst 0x4482826f  // srshl z15.s, p0/M, z15.s, z19.s\n"
+    ".inst 0x4482826e  // srshl z14.s, p0/M, z14.s, z19.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    ".inst 0x4482826d  // srshl z13.s, p0/M, z13.s, z19.s\n"
+    ".inst 0x4482826c  // srshl z12.s, p0/M, z12.s, z19.s\n"
+    "ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
+    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
+    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x4482822f  // srshl z15.s, p0/M, z15.s, z17.s\n"
+    ".inst 0x4482822e  // srshl z14.s, p0/M, z14.s, z17.s\n"
+    ".inst 0x4482822d  // srshl z13.s, p0/M, z13.s, z17.s\n"
+    ".inst 0x4482822c  // srshl z12.s, p0/M, z12.s, z17.s\n"
+    "add z15.s, z15.s, z16.s\n"
+    "add z14.s, z14.s, z16.s\n"
+    "add z13.s, z13.s, z16.s\n"
+    "add z12.s, z12.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z19.s\n"
+    "smin z14.s, p0/M, z14.s, z19.s\n"
+    "trn1 z23.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [outptr] "r" (outptr), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..d7bf6cb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void sme_u8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct sme_u8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
+{
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
+  sme_u8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sme_u8q_nhwc_max_generic_depthfirst_impl; }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..7914e35
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include "src/core/NEON/kernels/assembly/pooling.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sme_u8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov z3.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z2.b, #0x0\n"
+    "mov z1.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z18.b, p0/M, z18.b, z29.b\n"
+    "umax z22.b, p0/M, z22.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "umax z17.b, p0/M, z17.b, z27.b\n"
+    "umax z21.b, p0/M, z21.b, z26.b\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "umax z5.b, p0/M, z5.b, z19.b\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "umax z18.b, p0/M, z18.b, z29.b\n"
+    "umax z22.b, p0/M, z22.b, z28.b\n"
+    "umax z17.b, p0/M, z17.b, z27.b\n"
+    "umax z21.b, p0/M, z21.b, z26.b\n"
+    "umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "umax z5.b, p0/M, z5.b, z19.b\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "umax z5.b, p0/M, z5.b, z0.b\n"
+    "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+    "umax z3.b, p0/M, z3.b, z18.b\n"
+    "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+    "umax z2.b, p0/M, z2.b, z17.b\n"
+    "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+    "umax z1.b, p0/M, z1.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a8b7  // ushllb z23.h, z5.b, #0x0\n"
+    ".inst 0x4508acb9  // ushllt z25.h, z5.b, #0x0\n"
+    ".inst 0x4508a876  // ushllb z22.h, z3.b, #0x0\n"
+    ".inst 0x4508ac72  // ushllt z18.h, z3.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a855  // ushllb z21.h, z2.b, #0x0\n"
+    ".inst 0x4508ac51  // ushllt z17.h, z2.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a834  // ushllb z20.h, z1.b, #0x0\n"
+    ".inst 0x4508ac38  // ushllt z24.h, z1.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1rw { z19.s }, p0/Z, [x19]\n"
+    "neg z4.s, p0/M, z4.s\n"
+    ".inst 0x45974081  // saddwb z1.s, z4.s, z23.h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x45974497  // saddwt z23.s, z4.s, z23.h\n"
+    ".inst 0x45994080  // saddwb z0.s, z4.s, z25.h\n"
+    ".inst 0x4599449f  // saddwt z31.s, z4.s, z25.h\n"
+    ".inst 0x4596409e  // saddwb z30.s, z4.s, z22.h\n"
+    ".inst 0x45964496  // saddwt z22.s, z4.s, z22.h\n"
+    ".inst 0x4592409d  // saddwb z29.s, z4.s, z18.h\n"
+    ".inst 0x45924492  // saddwt z18.s, z4.s, z18.h\n"
+    ".inst 0x4595409c  // saddwb z28.s, z4.s, z21.h\n"
+    ".inst 0x45954495  // saddwt z21.s, z4.s, z21.h\n"
+    ".inst 0x4591409b  // saddwb z27.s, z4.s, z17.h\n"
+    ".inst 0x45914491  // saddwt z17.s, z4.s, z17.h\n"
+    ".inst 0x4594409a  // saddwb z26.s, z4.s, z20.h\n"
+    ".inst 0x45944494  // saddwt z20.s, z4.s, z20.h\n"
+    ".inst 0x45984099  // saddwb z25.s, z4.s, z24.h\n"
+    ".inst 0x45984498  // saddwt z24.s, z4.s, z24.h\n"
+    ".inst 0x44828061  // srshl z1.s, p0/M, z1.s, z3.s\n"
+    ".inst 0x44828077  // srshl z23.s, p0/M, z23.s, z3.s\n"
+    ".inst 0x44828060  // srshl z0.s, p0/M, z0.s, z3.s\n"
+    ".inst 0x4482807f  // srshl z31.s, p0/M, z31.s, z3.s\n"
+    ".inst 0x4482807e  // srshl z30.s, p0/M, z30.s, z3.s\n"
+    ".inst 0x44828076  // srshl z22.s, p0/M, z22.s, z3.s\n"
+    ".inst 0x4482807d  // srshl z29.s, p0/M, z29.s, z3.s\n"
+    ".inst 0x44828072  // srshl z18.s, p0/M, z18.s, z3.s\n"
+    ".inst 0x4482807c  // srshl z28.s, p0/M, z28.s, z3.s\n"
+    ".inst 0x44828075  // srshl z21.s, p0/M, z21.s, z3.s\n"
+    ".inst 0x4482807b  // srshl z27.s, p0/M, z27.s, z3.s\n"
+    ".inst 0x44828071  // srshl z17.s, p0/M, z17.s, z3.s\n"
+    ".inst 0x4482807a  // srshl z26.s, p0/M, z26.s, z3.s\n"
+    ".inst 0x44828074  // srshl z20.s, p0/M, z20.s, z3.s\n"
+    ".inst 0x44828079  // srshl z25.s, p0/M, z25.s, z3.s\n"
+    ".inst 0x44828078  // srshl z24.s, p0/M, z24.s, z3.s\n"
+    ".inst 0x04a27421  // sqrdmulh z1.s, z1.s, z2.s\n"
+    ".inst 0x04a276f7  // sqrdmulh z23.s, z23.s, z2.s\n"
+    ".inst 0x04a27400  // sqrdmulh z0.s, z0.s, z2.s\n"
+    ".inst 0x04a277ff  // sqrdmulh z31.s, z31.s, z2.s\n"
+    ".inst 0x04a277de  // sqrdmulh z30.s, z30.s, z2.s\n"
+    ".inst 0x04a276d6  // sqrdmulh z22.s, z22.s, z2.s\n"
+    ".inst 0x04a277bd  // sqrdmulh z29.s, z29.s, z2.s\n"
+    ".inst 0x04a27652  // sqrdmulh z18.s, z18.s, z2.s\n"
+    ".inst 0x04a2779c  // sqrdmulh z28.s, z28.s, z2.s\n"
+    ".inst 0x04a276b5  // sqrdmulh z21.s, z21.s, z2.s\n"
+    ".inst 0x04a2777b  // sqrdmulh z27.s, z27.s, z2.s\n"
+    ".inst 0x04a27631  // sqrdmulh z17.s, z17.s, z2.s\n"
+    ".inst 0x04a2775a  // sqrdmulh z26.s, z26.s, z2.s\n"
+    ".inst 0x04a27694  // sqrdmulh z20.s, z20.s, z2.s\n"
+    ".inst 0x04a27739  // sqrdmulh z25.s, z25.s, z2.s\n"
+    ".inst 0x04a27718  // sqrdmulh z24.s, z24.s, z2.s\n"
+    ".inst 0x44828261  // srshl z1.s, p0/M, z1.s, z19.s\n"
+    ".inst 0x44828277  // srshl z23.s, p0/M, z23.s, z19.s\n"
+    ".inst 0x44828260  // srshl z0.s, p0/M, z0.s, z19.s\n"
+    ".inst 0x4482827f  // srshl z31.s, p0/M, z31.s, z19.s\n"
+    ".inst 0x4482827e  // srshl z30.s, p0/M, z30.s, z19.s\n"
+    ".inst 0x44828276  // srshl z22.s, p0/M, z22.s, z19.s\n"
+    ".inst 0x4482827d  // srshl z29.s, p0/M, z29.s, z19.s\n"
+    ".inst 0x44828272  // srshl z18.s, p0/M, z18.s, z19.s\n"
+    ".inst 0x4482827c  // srshl z28.s, p0/M, z28.s, z19.s\n"
+    ".inst 0x44828275  // srshl z21.s, p0/M, z21.s, z19.s\n"
+    ".inst 0x4482827b  // srshl z27.s, p0/M, z27.s, z19.s\n"
+    ".inst 0x44828271  // srshl z17.s, p0/M, z17.s, z19.s\n"
+    ".inst 0x4482827a  // srshl z26.s, p0/M, z26.s, z19.s\n"
+    ".inst 0x44828274  // srshl z20.s, p0/M, z20.s, z19.s\n"
+    ".inst 0x44828279  // srshl z25.s, p0/M, z25.s, z19.s\n"
+    ".inst 0x44828278  // srshl z24.s, p0/M, z24.s, z19.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z23.s, z23.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "add z31.s, z31.s, z16.s\n"
+    "add z30.s, z30.s, z16.s\n"
+    "add z22.s, z22.s, z16.s\n"
+    "add z29.s, z29.s, z16.s\n"
+    "add z18.s, z18.s, z16.s\n"
+    "add z28.s, z28.s, z16.s\n"
+    "add z21.s, z21.s, z16.s\n"
+    "add z27.s, z27.s, z16.s\n"
+    "add z17.s, z17.s, z16.s\n"
+    "add z26.s, z26.s, z16.s\n"
+    "add z20.s, z20.s, z16.s\n"
+    "add z25.s, z25.s, z16.s\n"
+    "add z24.s, z24.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smax z30.s, p0/M, z30.s, z16.s\n"
+    "smax z22.s, p0/M, z22.s, z16.s\n"
+    "smax z29.s, p0/M, z29.s, z16.s\n"
+    "smax z18.s, p0/M, z18.s, z16.s\n"
+    "smax z28.s, p0/M, z28.s, z16.s\n"
+    "smax z21.s, p0/M, z21.s, z16.s\n"
+    "smax z27.s, p0/M, z27.s, z16.s\n"
+    "smax z17.s, p0/M, z17.s, z16.s\n"
+    "smax z26.s, p0/M, z26.s, z16.s\n"
+    "smax z20.s, p0/M, z20.s, z16.s\n"
+    "smax z25.s, p0/M, z25.s, z16.s\n"
+    "smax z24.s, p0/M, z24.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z23.s, p0/M, z23.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z23.h, z1.h, z23.h\n"
+    "smin z31.s, p0/M, z31.s, z19.s\n"
+    "smin z30.s, p0/M, z30.s, z19.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
+    "smin z22.s, p0/M, z22.s, z19.s\n"
+    "smin z29.s, p0/M, z29.s, z19.s\n"
+    "trn1 z22.h, z30.h, z22.h\n"
+    "smin z18.s, p0/M, z18.s, z19.s\n"
+    "smin z28.s, p0/M, z28.s, z19.s\n"
+    "trn1 z18.h, z29.h, z18.h\n"
+    "smin z21.s, p0/M, z21.s, z19.s\n"
+    "smin z27.s, p0/M, z27.s, z19.s\n"
+    "trn1 z21.h, z28.h, z21.h\n"
+    "smin z17.s, p0/M, z17.s, z19.s\n"
+    "smin z26.s, p0/M, z26.s, z19.s\n"
+    "trn1 z17.h, z27.h, z17.h\n"
+    "smin z20.s, p0/M, z20.s, z19.s\n"
+    "smin z25.s, p0/M, z25.s, z19.s\n"
+    "trn1 z20.h, z26.h, z20.h\n"
+    "smin z24.s, p0/M, z24.s, z19.s\n"
+    "trn1 z19.h, z25.h, z24.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "trn1 z18.b, z22.b, z18.b\n"
+    "trn1 z17.b, z21.b, z17.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "trn1 z16.b, z20.b, z19.b\n"
+    "st1b { z18.b }, p3, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z17.b }, p2, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax z5.b, p0/M, z5.b, z19.b\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
+    "umax z23.b, p0/M, z23.b, z30.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z5.b, p0/M, z5.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+    "subs x20, x20, #0x1\n"
+    "umax z5.b, p0/M, z5.b, z0.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a8b7  // ushllb z23.h, z5.b, #0x0\n"
+    ".inst 0x4508acb9  // ushllt z25.h, z5.b, #0x0\n"
+    "neg z4.s, p0/M, z4.s\n"
+    ".inst 0x45974081  // saddwb z1.s, z4.s, z23.h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x45974497  // saddwt z23.s, z4.s, z23.h\n"
+    ".inst 0x45994080  // saddwb z0.s, z4.s, z25.h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x4599449f  // saddwt z31.s, z4.s, z25.h\n"
+    ".inst 0x44828061  // srshl z1.s, p0/M, z1.s, z3.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1rw { z19.s }, p0/Z, [x19]\n"
+    ".inst 0x44828077  // srshl z23.s, p0/M, z23.s, z3.s\n"
+    ".inst 0x44828060  // srshl z0.s, p0/M, z0.s, z3.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x4482807f  // srshl z31.s, p0/M, z31.s, z3.s\n"
+    ".inst 0x04a27421  // sqrdmulh z1.s, z1.s, z2.s\n"
+    ".inst 0x04a276f7  // sqrdmulh z23.s, z23.s, z2.s\n"
+    ".inst 0x04a27400  // sqrdmulh z0.s, z0.s, z2.s\n"
+    ".inst 0x04a277ff  // sqrdmulh z31.s, z31.s, z2.s\n"
+    ".inst 0x44828261  // srshl z1.s, p0/M, z1.s, z19.s\n"
+    ".inst 0x44828277  // srshl z23.s, p0/M, z23.s, z19.s\n"
+    ".inst 0x44828260  // srshl z0.s, p0/M, z0.s, z19.s\n"
+    ".inst 0x4482827f  // srshl z31.s, p0/M, z31.s, z19.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z23.s, z23.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "add z31.s, z31.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z19.s, #0xff\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z19.s\n"
+    "smin z23.s, p0/M, z23.s, z19.s\n"
+    "smin z0.s, p0/M, z0.s, z19.s\n"
+    "trn1 z23.h, z1.h, z23.h\n"
+    "smin z31.s, p0/M, z31.s, z19.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
+    "trn1 z16.b, z23.b, z16.b\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
index e3ce652..a7f3dd3a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
@@ -33,6 +33,12 @@
 
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_fp16_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_fp16_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
 #include "kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
@@ -62,6 +68,58 @@
     },
   },
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      auto strat = new sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      auto strat = new sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp16_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      auto strat = new sme_fp16_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp16_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      auto strat = new sme_fp16_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
index 5ee0884..99d1065 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
@@ -30,6 +30,12 @@
 
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_fp32_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_fp32_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
 #include "kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
@@ -59,6 +65,58 @@
     },
   },
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      auto strat = new sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      auto strat = new sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp32_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      auto strat = new sme_fp32_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_fp32_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      auto strat = new sme_fp32_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
index 0867abc..8d08ddc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
@@ -30,6 +30,11 @@
 
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_s8_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_s8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
@@ -59,6 +64,45 @@
     },
   },
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<int8_t>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_s8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme2() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sme_s8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_s8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sme_s8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
index 6209f7c..dcb3c8f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
@@ -28,6 +28,10 @@
 #include "pooling_depthfirst_generic.hpp"
 
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_s8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_s8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp"
@@ -43,6 +47,32 @@
 
 static const PoolingImplementation<int8_t, int8_t, Requantize32> pooling_s8q_methods[] = {
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_s8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sme2() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sme_s8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_s8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sme2() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sme_s8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
index b0c908a..ee5a79b 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
@@ -30,6 +30,11 @@
 
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_u8_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sme_u8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
@@ -59,6 +64,52 @@
     },
   },
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sme() &&
+             is_supported<sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<uint8_t>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_u8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      // This kernel can only be used when there is either no padding, or we don't care
+      // about the value of the padding. Otherwise, we would need to pass in the zero-point
+      // for the quantization regime.
+      return (args.exclude_padding ||
+              (args.padding.top == 0 && args.padding.bottom == 0 &&
+               args.padding.left == 0 && args.padding.right == 0)
+              ) && args.pool_type == PoolingType::AVERAGE &&
+             args.cpu_info->has_sme2();
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sme_u8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_u8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sme() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sme_u8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
index de0420a..cd1b028 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
@@ -28,6 +28,10 @@
 #include "pooling_depthfirst_generic.hpp"
 
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+#include "kernels/sme_u8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sme_u8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp"
@@ -43,6 +47,32 @@
 
 static const PoolingImplementation<uint8_t, uint8_t, Requantize32> pooling_u8q_methods[] = {
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_u8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sme2() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sme_u8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sme_u8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sme2() && args.pool_type == PoolingType::MAX;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sme_u8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
+    },
+  },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
index 58e4861..515d55c 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
@@ -48,10 +48,20 @@
 #include "kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp"
 #include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
 #endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_gemv_bf16fp32_dot_16VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
 #include "kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp"
 #include "kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp"
 #include "kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp"
 #include "kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
 
 namespace arm_gemm {
 
@@ -60,6 +70,39 @@
 #ifdef __aarch64__
 #ifdef ARM_COMPUTE_ENABLE_BF16
 #ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+// SME kernels
+{
+    GemmMethod::GEMM_HYBRID,
+    "sme2_gemv_bf16fp32_dot_16VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+    nullptr,
+    [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_bf16fp32_dot_16VL, bfloat16, float>(args); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, bfloat16, float>(args); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL, bfloat16, float>(args); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    nullptr,
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL, bfloat16, float>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
 // gemm_bf16_interleaved
 GemmImplementation<bfloat16, float>::with_estimate(
     GemmMethod::GEMM_INTERLEAVED,
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 0fc9e8b..f9ffd18 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -48,12 +48,24 @@
 #include "kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp"
 #include "kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp"
 
+#ifdef ARM_COMPUTE_ENABLE_SVE
 #ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
 #include "kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp"
 #include "kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp"
 #include "kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp"
 #include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
 #endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_gemv_fp32_mla_16VL.hpp"
+#include "kernels/sme2_gemv_fp32bf16fp32_dot_16VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
 #include "kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp"
 #include "kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp"
 #include "kernels/sve_hybrid_fp32_mla_6x4VL.hpp"
@@ -62,6 +74,7 @@
 #include "kernels/sve_interleaved_fp32_mla_8x3VL.hpp"
 #include "kernels/sve_interleaved_fp32_mmla_8x3VL.hpp"
 #include "kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
 
 namespace arm_gemm {
 
@@ -102,6 +115,75 @@
 ),
 #endif // ARM_COMPUTE_ENABLE_BF16
 #ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+// SME kernels
+{
+    GemmMethod::GEMM_HYBRID,
+    "sme2_gemv_fp32bf16fp32_dot_16VL",
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+    nullptr,
+    [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32bf16fp32_dot_16VL, float, float>(args); }
+},
+{
+    GemmMethod::GEMM_HYBRID,
+    "sme2_gemv_fp32_mla_16VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+    nullptr,
+    [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32_mla_16VL, float, float>(args); }
+},
+#ifdef ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, float, float>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_fp32_mopa_1VLx4VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL, float, float>(args); }
+},
+#ifdef ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL",
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL, float, float>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_fp32_mopa_4VLx1VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL, float, float>(args); }
+},
+#ifdef ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL",
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    nullptr,
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL, float, float>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_BF16
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_fp32_mopa_2VLx2VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    nullptr,
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL, float, float>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
 #ifdef ARM_COMPUTE_ENABLE_BF16
 GemmImplementation<float, float>::with_estimate(
     GemmMethod::GEMM_INTERLEAVED,
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index 2450748..38a7c94 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -39,16 +39,50 @@
 #include "kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp"
 #include "kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp"
 
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
 #include "kernels/sve_hybrid_s8s32_dot_6x4VL.hpp"
 #include "kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp"
 #include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
 #include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
 #include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
 
 namespace arm_gemm {
 
 static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
 #ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+// SME kernels
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<int32_t>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL, int8_t, int32_t>(args); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { const auto VL = sme::get_vector_length<int32_t>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL, int8_t, int32_t>(args); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL",
+    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    nullptr,
+    [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL, int8_t, int32_t>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
 GemmImplementation<int8_t, int32_t>::with_estimate(
     GemmMethod::GEMM_HYBRID,
     "sve_hybrid_s8s32_mmla_6x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index 470cee1..e7346e8 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -30,6 +30,7 @@
 #include "bfloat.hpp"
 #include "convolver.hpp"
 #include "kernel_weight_format.hpp"
+#include "kernel_traits.hpp"
 #include "mergeresults.hpp"
 #include "performance_parameters.hpp"
 #include "quantized.hpp"
@@ -576,6 +577,26 @@
             return get_ktotal(args);
         }
 
+        // Special blocking for SME
+        if (is_sme<strategy>::value) {
+            // Don't bother to block below this size threshold, experimentally determined to be 320 for FP32
+            unsigned int scaling_threshold = 1280 / sizeof(Toi);
+
+            if (get_ktotal(args) <= scaling_threshold) {
+                return get_ktotal(args);
+            }
+
+            // Once we are blocking, this (lower) threshold determines when we should use more blocks
+            // NOTE: Could be that some factor-based solution would work better here.
+            unsigned int max_block_size = 1024 / sizeof(Toi);
+
+            unsigned int num_k_blocks = iceildiv(get_ktotal(args), max_block_size);
+
+            unsigned int k_block = roundup(iceildiv(get_ktotal(args), num_k_blocks), strategy::k_unroll());
+
+            return k_block;
+        }
+
         const unsigned int L1_size = args._ci->get_L1_cache_size();
         unsigned int k_block;
 
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
index 1d7b9c5..ac49536 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
@@ -38,6 +38,14 @@
 #include "kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp"
 #include "kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp"
 
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_gemv_s8qa_dot_16VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
 #include "kernels/sve_hybrid_s8qa_dot_4x4VL.hpp"
 #include "kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp"
 #include "kernels/sve_hybrid_s8qs_dot_6x4VL.hpp"
@@ -47,11 +55,13 @@
 #include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
 #include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
 #include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
 
 #include "gemm_hybrid_indirect.hpp"
 #include "gemm_hybrid_quantized.hpp"
 #include "gemm_hybrid_quantized_inline.hpp"
 #include "gemm_interleaved.hpp"
+#include "gemv_pretransposed.hpp"
 #include "quantize_wrapper.hpp"
 #include "utils.hpp"
 
@@ -60,6 +70,38 @@
 static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods[] =
 {
 #ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+{
+    GemmMethod::GEMM_HYBRID,
+    "sme2_gemv_s8qa_dot_16VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && quant_hybrid_asymmetric(qp) && args._Msize == 1 && !args._indirect_input && args._nbatches == 1;  },
+    nullptr,
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemvPretransposed<cls_sme2_gemv_s8qa_dot_16VL, int8_t, int8_t, Requantize32>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8q_mopa_1VLx4VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    [](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<int32_t>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL, int8_t, int8_t>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8q_mopa_4VLx1VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    [](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<int32_t>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_s8q_mopa_4VLx1VL, int8_t, int8_t>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_s8q_mopa_2VLx2VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    nullptr,
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL, int8_t, int8_t>(args, qp); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
 GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
     GemmMethod::GEMM_HYBRID,
     "sve_hybrid_s8qa_mmla_4x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
index be7a4ee..ba9649c 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
@@ -36,6 +36,14 @@
 #include "kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp"
 #include "kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp"
 
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_gemv_u8qa_dot_16VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
 #include "kernels/sve_hybrid_u8qa_dot_4x4VL.hpp"
 #include "kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp"
 #include "kernels/sve_hybrid_u8u32_dot_6x4VL.hpp"
@@ -43,11 +51,13 @@
 #include "kernels/sve_interleaved_u8u32_dot_8x3VL.hpp"
 #include "kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp"
 #include "kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
 
 #include "gemm_hybrid_indirect.hpp"
 #include "gemm_hybrid_quantized.hpp"
 #include "gemm_hybrid_quantized_inline.hpp"
 #include "gemm_interleaved.hpp"
+#include "gemv_pretransposed.hpp"
 #include "quantize_wrapper.hpp"
 
 namespace arm_gemm {
@@ -55,6 +65,39 @@
 static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_methods[] =
 {
 #ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+// SME kernels
+{
+    GemmMethod::GEMM_HYBRID,
+    "sme2_gemv_u8qa_dot_16VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && quant_hybrid_asymmetric(qp) && args._Msize == 1 && !args._indirect_input && args._nbatches == 1;  },
+    nullptr,
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemvPretransposed<cls_sme2_gemv_u8qa_dot_16VL, uint8_t, uint8_t, Requantize32>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_u8q_mopa_1VLx4VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    [](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<uint32_t>();
+                               return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL, uint8_t, uint8_t>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_u8q_mopa_4VLx1VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    [](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<int32_t>();
+                               return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_u8q_mopa_4VLx1VL, uint8_t, uint8_t>(args, qp); }
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "sme2_interleaved_nomerge_u8q_mopa_2VLx2VL",
+    [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && args._maxthreads == 1 && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
+    nullptr,
+    [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL, uint8_t, uint8_t>(args, qp); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
 GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
     GemmMethod::GEMM_HYBRID,
     "sve_hybrid_u8qa_mmla_4x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
index f0b4e5d..86b33d0 100644
--- a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -41,10 +41,10 @@
 template<typename OutputStage>
 class run_gemv_kernel {
 public:
-    template<typename strategy, typename To, typename Tr>
+    template<typename strategy, typename Tlo, typename Tro, typename Tr>
     static void run (
         const strategy &strat,
-        const To *A_ptr, const To *B_ptr, Tr *c_ptr,
+        const Tlo *A_ptr, const Tro *B_ptr, Tr *c_ptr,
         size_t N, size_t K,
         const Tr *bias, const Activation &act, bool Accumulate,
         const OutputStage &os, const int32_t *col_bias, unsigned int col_base
@@ -52,10 +52,10 @@
 };
 
 template<>
-template<typename strategy, typename To, typename Tr>
+template<typename strategy, typename Tlo, typename Tro, typename Tr>
 void run_gemv_kernel<Nothing>::run(
         const strategy &strat,
-        const To *A_ptr, const To *B_ptr, Tr *C_ptr,
+        const Tlo *A_ptr, const Tro *B_ptr, Tr *C_ptr,
         size_t N, size_t K,
         const Tr *bias, const Activation &act, bool Accumulate,
         const Nothing &, const int32_t *, unsigned int
@@ -65,10 +65,10 @@
 }
 
 template<>
-template<typename strategy, typename To, typename Tr>
+template<typename strategy, typename Tlo, typename Tro, typename Tr>
 void run_gemv_kernel<Requantize32>::run(
         const strategy &strat,
-        const To *A_ptr, const To *B_ptr, Tr *C_ptr,
+        const Tlo *A_ptr, const Tro *B_ptr, Tr *C_ptr,
         size_t N, size_t K,
         const Tr *, const Activation &, bool,
         const Requantize32 &qp, const int32_t *col_bias, unsigned int col_base
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp
new file mode 100644
index 0000000..9d8eb22
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "sme_interleave1VL_bf16_bf16.hpp"
+#include "sme_interleave1VL_block2_bf16_bf16.hpp"
+#include "sme_interleave1VL_block4_s8_s8.hpp"
+#include "sme_interleave1VL_block4_u8_u8.hpp"
+#include "sme_interleave1VL_block4_s8_s8_summing.hpp"
+#include "sme_interleave1VL_block4_u8_u8_summing.hpp"
+#include "sme_interleave1VL_fp16_fp16.hpp"
+#include "sme_interleave1VL_fp32_fp32.hpp"
+#include "sme_interleave2VL_block2_bf16_bf16.hpp"
+#include "sme_interleave2VL_block2_fp16_fp16.hpp"
+#include "sme_interleave2VL_block4_s8_s8.hpp"
+#include "sme_interleave2VL_block4_s8_s8_summing.hpp"
+#include "sme_interleave2VL_block4_u8_u8.hpp"
+#include "sme_interleave2VL_block4_u8_u8_summing.hpp"
+#include "sme_interleave2VL_fp16_fp16.hpp"
+#include "sme_interleave2VL_bf16_bf16.hpp"
+#include "sme_interleave2VL_fp32_fp32.hpp"
+#include "sme_interleave4VL_block2_bf16_bf16.hpp"
+#include "sme_interleave4VL_block4_s8_s8.hpp"
+#include "sme_interleave4VL_block4_u8_u8.hpp"
+#include "sme_interleave4VL_block4_s8_s8_summing.hpp"
+#include "sme_interleave4VL_block4_u8_u8_summing.hpp"
+#include "sme_interleave4VL_fp32_fp32.hpp"
+
+#include "sme2_interleave1VL_block2_fp32_bf16.hpp"
+#include "sme2_interleave2VL_block2_fp32_bf16.hpp"
+#include "sme2_interleave4VL_block2_fp32_bf16.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
new file mode 100644
index 0000000..c6ff375
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 2, VLType::SME, false>(
+  bfloat16 * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x21, ALL, MUL #2\n"
+      "sub x27, %x[width], #0x1\n"
+      "cntw x20, ALL, MUL #2\n"
+      "sub x19, x21, #0x1\n"
+      "whilelt p10.s, XZR, %x[height]\n"
+      "add x27, x27, x20\n"
+      "ands x26, %x[width], x19\n"
+      "udiv x27, x27, x20\n"
+      "csel x26, x26, x21, NE\n"
+      "mov x25, #0x0\n"
+      "and x24, x27, #0x1\n"
+      "sub x27, x27, #0x1\n"
+      "add x26, x26, #0x1\n"
+      "mov x19, %x[width]\n"
+      "ptrue p0.b\n"
+      "mov x23, %x[outptr_raw]\n"
+      "mov x22, %x[row_offset]\n"
+      "cntw x21\n"
+      "lsr x27, x27, #0x1\n"
+      "lsr x26, x26, #0x1\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34731  // whilelt pn9.s, x25, x19, VLx2\n"
+      "mov x20, %x[in]\n"
+      "1:"  // Width loop: Preamble: Loop
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0x25306548  // psel p8.s, p9.s/Z, p10.s[w12]\n"
+      ".inst 0xa0164266  // ld1w { z6.s-z7.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
+      ".inst 0xc160e0c6  // bfcvt z6.h, { z6.s-z7.s }\n"
+      ".inst 0xc08000c0  // mova za0h.s[x12], p0/M, z6.s\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x21\n"
+      "blt 1b\n"
+      "incw x22, ALL, MUL #2\n"
+      "incw x25, ALL, MUL #2\n"
+      "cbz x27, 5f\n"
+      "2:"  // Width loop
+      "mov x19, %x[width]\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34731  // whilelt pn9.s, x25, x19, VLx2\n"
+      "mov x20, %x[in]\n"
+      "3:"  // Width loop: Odd: Loop
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0x25306548  // psel p8.s, p9.s/Z, p10.s[w12]\n"
+      ".inst 0xa016427e  // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
+      ".inst 0xc160e3de  // bfcvt z30.h, { z30.s-z31.s }\n"
+      ".inst 0xc08003c8  // mova za2h.s[x12], p0/M, z30.s\n"
+      ".inst 0xc082800f  // mova z15.s, p0/M, za0v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x21\n"
+      "st1w { z15.s }, p0, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "blt 3b\n"
+      "incw x25, ALL, MUL #2\n"
+      "mov x19, %x[width]\n"
+      "incw x22, ALL, MUL #2\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34731  // whilelt pn9.s, x25, x19, VLx2\n"
+      "mov x20, %x[in]\n"
+      "4:"  // Width loop: Even: Loop
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0x25306548  // psel p8.s, p9.s/Z, p10.s[w12]\n"
+      ".inst 0xa0164278  // ld1w { z24.s-z25.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
+      ".inst 0xc160e318  // bfcvt z24.h, { z24.s-z25.s }\n"
+      ".inst 0xc0800300  // mova za0h.s[x12], p0/M, z24.s\n"
+      ".inst 0xc0828110  // mova z16.s, p0/M, za2v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x21\n"
+      "st1w { z16.s }, p0, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "blt 4b\n"
+      "subs x27, x27, #0x1\n"
+      "incw x22, ALL, MUL #2\n"
+      "incw x25, ALL, MUL #2\n"
+      "bgt 2b\n"
+      "5:"  // Width loop: Tails
+      "cbnz x24, 8f\n"
+      "mov x19, %x[width]\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34731  // whilelt pn9.s, x25, x19, VLx2\n"
+      "mov x20, %x[in]\n"
+      "6:"  // Width loop: Tails: Even: Odd: Loop
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0x25306548  // psel p8.s, p9.s/Z, p10.s[w12]\n"
+      ".inst 0xa016426e  // ld1w { z14.s-z15.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
+      ".inst 0xc160e1ce  // bfcvt z14.h, { z14.s-z15.s }\n"
+      ".inst 0xc08001c8  // mova za2h.s[x12], p0/M, z14.s\n"
+      ".inst 0xc0828010  // mova z16.s, p0/M, za0v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x21\n"
+      "st1w { z16.s }, p0, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "blt 6b\n"
+      "mov x12, #0x0\n"
+      "7:"  // Width loop: Tails: Even: Even: Loop
+      ".inst 0xc0828110  // mova z16.s, p0/M, za2v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x26\n"
+      "st1w { z16.s }, p0, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "blt 7b\n"
+      "b 10f\n"
+      "8:"  // Width loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "9:"  // Width loop: Tails: Odd: Loop
+      ".inst 0xc0828010  // mova z16.s, p0/M, za0v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x26\n"
+      "st1w { z16.s }, p0, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "blt 9b\n"
+      "10:"  // End
+      "mov %x[outptr_raw], x23\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [outptr_raw] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
new file mode 100644
index 0000000..e712eca
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 2, VLType::SME, false>(
+  bfloat16 * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x21, ALL, MUL #2\n"
+      "cntw x28\n"
+      "sub x27, %x[width], #0x1\n"
+      "cntw x20, ALL, MUL #2\n"
+      "sub x19, x21, #0x1\n"
+      ".inst 0x25207815  // ptrue pn13.b\n"
+      "whilelt p12.s, XZR, %x[height]\n"
+      "whilelt p11.s, x28, %x[height]\n"
+      "add x27, x27, x20\n"
+      "ands x26, %x[width], x19\n"
+      "udiv x27, x27, x20\n"
+      "csel x26, x26, x21, NE\n"
+      "mov x25, #0x0\n"
+      "and x24, x27, #0x1\n"
+      "sub x27, x27, #0x1\n"
+      "add x26, x26, #0x1\n"
+      "mov x19, %x[width]\n"
+      "mov x23, %x[in]\n"
+      "ptrue p0.b\n"
+      "mov x22, %x[outptr_raw]\n"
+      "mov x21, %x[row_offset]\n"
+      "lsr x27, x27, #0x1\n"
+      "lsr x26, x26, #0x1\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34732  // whilelt pn10.s, x25, x19, VLx2\n"
+      "add x20, x23, x28, LSL #3\n"
+      "1:"  // Width loop: Preamble: Loop
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0x25306989  // psel p9.s, p10.s/Z, p12.s[w12]\n"
+      ".inst 0x25306968  // psel p8.s, p10.s/Z, p11.s[w12]\n"
+      ".inst 0xa0154678  // ld1w { z24.s-z25.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa0154276  // ld1w { z22.s-z23.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e318  // bfcvt z24.h, { z24.s-z25.s }\n"
+      ".inst 0xc160e2d6  // bfcvt z22.h, { z22.s-z23.s }\n"
+      ".inst 0xc0800300  // mova za0h.s[x12], p0/M, z24.s\n"
+      ".inst 0xc08002c4  // mova za1h.s[x12], p0/M, z22.s\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x28\n"
+      "blt 1b\n"
+      "incw x21, ALL, MUL #2\n"
+      "incw x25, ALL, MUL #2\n"
+      "cbz x27, 5f\n"
+      "2:"  // Width loop
+      "mov x19, %x[width]\n"
+      "mov x23, %x[in]\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34732  // whilelt pn10.s, x25, x19, VLx2\n"
+      "add x20, x23, x28, LSL #3\n"
+      "3:"  // Width loop: Odd: Loop
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0x25306989  // psel p9.s, p10.s/Z, p12.s[w12]\n"
+      ".inst 0x25306968  // psel p8.s, p10.s/Z, p11.s[w12]\n"
+      ".inst 0xa0154676  // ld1w { z22.s-z23.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa015426a  // ld1w { z10.s-z11.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e2d6  // bfcvt z22.h, { z22.s-z23.s }\n"
+      ".inst 0xc160e14a  // bfcvt z10.h, { z10.s-z11.s }\n"
+      ".inst 0xc08002c8  // mova za2h.s[x12], p0/M, z22.s\n"
+      ".inst 0xc080014c  // mova za3h.s[x12], p0/M, z10.s\n"
+      ".inst 0xc0828008  // mova z8.s, p0/M, za0v.s[x12]\n"
+      ".inst 0xc0828089  // mova z9.s, p0/M, za1v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x28\n"
+      ".inst 0xa06056c8  // st1w { z8.s-z9.s }, pn13.b, [x22]\n"
+      "addvl x22, x22, #2\n"
+      "blt 3b\n"
+      "incw x25, ALL, MUL #2\n"
+      "mov x19, %x[width]\n"
+      "mov x23, %x[in]\n"
+      "incw x21, ALL, MUL #2\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34732  // whilelt pn10.s, x25, x19, VLx2\n"
+      "add x20, x23, x28, LSL #3\n"
+      "4:"  // Width loop: Even: Loop
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0x25306989  // psel p9.s, p10.s/Z, p12.s[w12]\n"
+      ".inst 0x25306968  // psel p8.s, p10.s/Z, p11.s[w12]\n"
+      ".inst 0xa015467a  // ld1w { z26.s-z27.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa015427e  // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e35a  // bfcvt z26.h, { z26.s-z27.s }\n"
+      ".inst 0xc160e3de  // bfcvt z30.h, { z30.s-z31.s }\n"
+      ".inst 0xc0800340  // mova za0h.s[x12], p0/M, z26.s\n"
+      ".inst 0xc08003c4  // mova za1h.s[x12], p0/M, z30.s\n"
+      ".inst 0xc0828106  // mova z6.s, p0/M, za2v.s[x12]\n"
+      ".inst 0xc082818e  // mova z14.s, p0/M, za3v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x28\n"
+      ".inst 0xa16056c6  // st1w { z6.s, z14.s }, pn13.b, [x22]\n"
+      "addvl x22, x22, #2\n"
+      "blt 4b\n"
+      "subs x27, x27, #0x1\n"
+      "incw x21, ALL, MUL #2\n"
+      "incw x25, ALL, MUL #2\n"
+      "bgt 2b\n"
+      "5:"  // Width loop: Tails
+      "cbnz x24, 8f\n"
+      "mov x19, %x[width]\n"
+      "mov x23, %x[in]\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34732  // whilelt pn10.s, x25, x19, VLx2\n"
+      "add x20, x23, x28, LSL #3\n"
+      "6:"  // Width loop: Tails: Even: Odd: Loop
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0x25306989  // psel p9.s, p10.s/Z, p12.s[w12]\n"
+      ".inst 0x25306968  // psel p8.s, p10.s/Z, p11.s[w12]\n"
+      ".inst 0xa015466c  // ld1w { z12.s-z13.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa015426e  // ld1w { z14.s-z15.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e18c  // bfcvt z12.h, { z12.s-z13.s }\n"
+      ".inst 0xc160e1ce  // bfcvt z14.h, { z14.s-z15.s }\n"
+      ".inst 0xc0800188  // mova za2h.s[x12], p0/M, z12.s\n"
+      ".inst 0xc08001cc  // mova za3h.s[x12], p0/M, z14.s\n"
+      ".inst 0xc0828007  // mova z7.s, p0/M, za0v.s[x12]\n"
+      ".inst 0xc082808f  // mova z15.s, p0/M, za1v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x28\n"
+      ".inst 0xa16056c7  // st1w { z7.s, z15.s }, pn13.b, [x22]\n"
+      "addvl x22, x22, #2\n"
+      "blt 6b\n"
+      "mov x12, #0x0\n"
+      "7:"  // Width loop: Tails: Even: Even: Loop
+      ".inst 0xc082810e  // mova z14.s, p0/M, za2v.s[x12]\n"
+      ".inst 0xc082818f  // mova z15.s, p0/M, za3v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x26\n"
+      ".inst 0xa06056ce  // st1w { z14.s-z15.s }, pn13.b, [x22]\n"
+      "addvl x22, x22, #2\n"
+      "blt 7b\n"
+      "b 10f\n"
+      "8:"  // Width loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "9:"  // Width loop: Tails: Odd: Loop
+      ".inst 0xc0828014  // mova z20.s, p0/M, za0v.s[x12]\n"
+      ".inst 0xc0828095  // mova z21.s, p0/M, za1v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x26\n"
+      ".inst 0xa06056d4  // st1w { z20.s-z21.s }, pn13.b, [x22]\n"
+      "addvl x22, x22, #2\n"
+      "blt 9b\n"
+      "10:"  // End
+      "mov %x[outptr_raw], x22\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [outptr_raw] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
new file mode 100644
index 0000000..e08d6d9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 2, VLType::SME, false>(
+  bfloat16 * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x22, ALL, MUL #2\n"
+      "cntw x9\n"
+      "cntw x21, ALL, MUL #2\n"
+      "cntw x19, ALL, MUL #3\n"
+      "sub x20, x22, #0x1\n"
+      ".inst 0x25207817  // ptrue pn15.b\n"
+      "whilelt p1.s, XZR, %x[height]\n"
+      "whilelt p14.s, x9, %x[height]\n"
+      "whilelt p13.s, x21, %x[height]\n"
+      "whilelt p12.s, x19, %x[height]\n"
+      "sub x28, %x[width], #0x1\n"
+      "cntw x19, ALL, MUL #2\n"
+      "ands x27, %x[width], x20\n"
+      "mov x26, %x[in]\n"
+      "add x28, x28, x19\n"
+      "csel x27, x27, x22, NE\n"
+      "add x25, x26, x9, LSL #3\n"
+      "mov x24, #0x0\n"
+      "udiv x28, x28, x19\n"
+      "add x27, x27, #0x1\n"
+      "mov x19, %x[width]\n"
+      "add x23, x25, x9, LSL #3\n"
+      "ptrue p0.b\n"
+      "mov x22, %x[outptr_raw]\n"
+      "mov x21, %x[row_offset]\n"
+      "sub x28, x28, #0x1\n"
+      "lsr x27, x27, #0x1\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34713  // whilelt pn11.s, x24, x19, VLx2\n"
+      "add x20, x23, x9, LSL #3\n"
+      "1:"  // Width loop: Preamble: Loop
+      "ldr x19, [x26], #0x8\n"
+      ".inst 0x25306c28  // psel p8.s, p11.s/Z, p1.s[w12]\n"
+      ".inst 0x25306dca  // psel p10.s, p11.s/Z, p14.s[w12]\n"
+      ".inst 0xa0154278  // ld1w { z24.s-z25.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x25], #0x8\n"
+      ".inst 0x25306da9  // psel p9.s, p11.s/Z, p13.s[w12]\n"
+      ".inst 0x25306d88  // psel p8.s, p11.s/Z, p12.s[w12]\n"
+      ".inst 0xa0154a62  // ld1w { z2.s-z3.s }, pn10.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0xa015466a  // ld1w { z10.s-z11.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e318  // bfcvt z24.h, { z24.s-z25.s }\n"
+      ".inst 0xc160e042  // bfcvt z2.h, { z2.s-z3.s }\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa015426c  // ld1w { z12.s-z13.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e14a  // bfcvt z10.h, { z10.s-z11.s }\n"
+      ".inst 0xc160e18c  // bfcvt z12.h, { z12.s-z13.s }\n"
+      ".inst 0xc0800300  // mova za0h.s[x12], p0/M, z24.s\n"
+      ".inst 0xc0800044  // mova za1h.s[x12], p0/M, z2.s\n"
+      ".inst 0xc0800148  // mova za2h.s[x12], p0/M, z10.s\n"
+      ".inst 0xc080018c  // mova za3h.s[x12], p0/M, z12.s\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "blt 1b\n"
+      "incw x21, ALL, MUL #2\n"
+      "incw x24, ALL, MUL #2\n"
+      "cbz x28, 5f\n"
+      "2:"  // Width loop
+      "mov x12, #0x0\n"
+      "3:"  // Width loop: Store: Loop
+      ".inst 0xc0828011  // mova z17.s, p0/M, za0v.s[x12]\n"
+      ".inst 0xc0828095  // mova z21.s, p0/M, za1v.s[x12]\n"
+      ".inst 0xc0828119  // mova z25.s, p0/M, za2v.s[x12]\n"
+      ".inst 0xc082819d  // mova z29.s, p0/M, za3v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      ".inst 0xa160ded1  // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x22]\n"
+      "addvl x22, x22, #4\n"
+      "blt 3b\n"
+      "mov x26, %x[in]\n"
+      "add x25, x26, x9, LSL #3\n"
+      "mov x19, %x[width]\n"
+      "add x23, x25, x9, LSL #3\n"
+      "mov x12, #0x0\n"
+      ".inst 0x25b34713  // whilelt pn11.s, x24, x19, VLx2\n"
+      "add x20, x23, x9, LSL #3\n"
+      "4:"  // Width loop: Load: Loop
+      "ldr x19, [x26], #0x8\n"
+      ".inst 0x25306c28  // psel p8.s, p11.s/Z, p1.s[w12]\n"
+      ".inst 0x25306dca  // psel p10.s, p11.s/Z, p14.s[w12]\n"
+      ".inst 0xa015426c  // ld1w { z12.s-z13.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x25], #0x8\n"
+      ".inst 0x25306da9  // psel p9.s, p11.s/Z, p13.s[w12]\n"
+      ".inst 0x25306d88  // psel p8.s, p11.s/Z, p12.s[w12]\n"
+      ".inst 0xa0154a6e  // ld1w { z14.s-z15.s }, pn10.s/Z, [x19, x21, LSL #2]\n"
+      "ldr x19, [x23], #0x8\n"
+      ".inst 0xa0154672  // ld1w { z18.s-z19.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e18c  // bfcvt z12.h, { z12.s-z13.s }\n"
+      ".inst 0xc160e1ce  // bfcvt z14.h, { z14.s-z15.s }\n"
+      "ldr x19, [x20], #0x8\n"
+      ".inst 0xa015427e  // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+      ".inst 0xc160e252  // bfcvt z18.h, { z18.s-z19.s }\n"
+      ".inst 0xc160e3de  // bfcvt z30.h, { z30.s-z31.s }\n"
+      ".inst 0xc0800180  // mova za0h.s[x12], p0/M, z12.s\n"
+      ".inst 0xc08001c4  // mova za1h.s[x12], p0/M, z14.s\n"
+      ".inst 0xc0800248  // mova za2h.s[x12], p0/M, z18.s\n"
+      ".inst 0xc08003cc  // mova za3h.s[x12], p0/M, z30.s\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "subs x28, x28, #0x1\n"
+      "incw x21, ALL, MUL #2\n"
+      "incw x24, ALL, MUL #2\n"
+      "bgt 2b\n"
+      "5:"  // Width loop: Tails
+      "mov x12, #0x0\n"
+      "6:"  // Width loop: Tails: Loop
+      ".inst 0xc0828011  // mova z17.s, p0/M, za0v.s[x12]\n"
+      ".inst 0xc0828095  // mova z21.s, p0/M, za1v.s[x12]\n"
+      ".inst 0xc0828119  // mova z25.s, p0/M, za2v.s[x12]\n"
+      ".inst 0xc082819d  // mova z29.s, p0/M, za3v.s[x12]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      ".inst 0xa160ded1  // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x22]\n"
+      "addvl x22, x22, #4\n"
+      "blt 6b\n"
+      "7:"  // End
+      "mov %x[outptr_raw], x22\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [outptr_raw] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
new file mode 100644
index 0000000..3c8c707
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 1, VLType::SME, false>(
+  bfloat16 * &out, const bfloat16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov x20, %x[width]\n"
+      "inch x20\n"
+      "cnth x10\n"
+      "sub x20, x20, #0x1\n"
+      "udiv x20, x20, x10\n"  // n_passes = ceildiv(width, VL<T>)
+      "mov x19, %x[width]\n"
+      "sub x9, x10, #0x1\n"
+      "sub x28, x20, #0x1\n"
+      "ands x9, x19, x9\n"
+      "sub x27, x10, #0x2\n"
+      "lsl x19, %x[height], #0x1\n"  // height * 2
+      "mov x26, #0x0\n"
+      "mov x25, %x[in]\n"
+      "lsr x28, x28, #0x1\n"  // n_loops = (n_passes - 1) / 2
+      "ldr x24, [x25, #0x0]\n"
+      "and x23, x20, #0x1\n"  // odd_tail = bool(n_passes & 0x1)
+      "csel x9, x9, x10, NE\n"
+      "ldr x22, [x25, #0x8]\n"
+      "ptrue p11.h\n"
+      "whilelt p10.h, XZR, x19\n"
+      "mov x21, %x[row_offset]\n"
+      "mov x20, %x[out]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "add x25, x25, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386140  // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0xe05502c1  // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386140  // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe05502c1  // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "inch x21\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "inch x26\n"
+      "cbz x28, 8f\n"
+      "mov x19, x28\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c9  // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      ".inst 0xe06a8281  // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c9  // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "inch x26\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe06a8281  // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "addvl x20, x20, #2\n"
+      "inch x21\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c1  // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      ".inst 0xe06a8289  // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c1  // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "subs x19, x19, #0x1\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe06a8289  // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "addvl x20, x20, #2\n"
+      "inch x26\n"
+      "inch x21\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x23, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "add x25, x25, #0x8\n"
+      "addvl x20, x20, #1\n"
+      "blt 9b\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 10b\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x20\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
new file mode 100644
index 0000000..81b346c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 2, VLType::SME, false>(
+  bfloat16 * &out, const bfloat16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x20\n"
+      "cntw x10\n"
+      "mov x19, %x[width]\n"
+      "inch x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x9, x19, #0x1\n"
+      "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x27, x20, #0x1\n"
+      "ands x27, x19, x27\n"
+      "csel x27, x27, x20, NE\n"
+      "add x27, x27, #0x1\n"
+      "lsr x27, x27, #0x1\n"
+      "sub x26, x10, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x19, %x[height], #0x1\n" // height * 2
+      "whilelt p10.h, XZR, x19\n"
+      "mov x25, %x[row_offset]\n"
+      "mov x24, %x[out]\n"
+      "mov x23, #0x0\n"
+      "whilelt p9.h, x23, %x[width]\n"
+      "whilelt p8.h, x23, %x[width]\n"
+      "mov x22, %x[in]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25286140  // dup p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe05902a0  // ld1h { za0h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25686140  // dup p0.h, p8.h/Z, p10.h[w12, #2]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0590282  // ld1h { za0h.h[x12, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x26, LSL #1\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25286140  // dup p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe05902a0  // ld1h { za0h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25686140  // dup p0.h, p8.h/Z, p10.h[w12, #2]\n"
+      "mov x22, %x[in]\n"
+      ".inst 0xe0590282  // ld1h { za0h.h[x12, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "inch x25\n"
+      "inch x23\n"
+      "cbz x9, 8f\n"
+      "mov x19, x9\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.h, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25396140  // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+      ".inst 0xe05922a1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25796140  // dup p0.h, p8.h/Z, p10.h[w13, #3]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0592283  // ld1h { za0h.h[x13, #3] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25396140  // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+      ".inst 0xe05922a1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25796140  // dup p0.h, p8.h/Z, p10.h[w13, #3]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0592283  // ld1h { za0h.h[x13, #3] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.h, x23, %x[width]\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "inch x23\n"
+      "inch x25\n"
+      "whilelt p8.h, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25296140  // dup p0.h, p8.h/Z, p10.h[w13]\n"
+      ".inst 0xe05922a0  // ld1h { za0h.h[x13] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25696140  // dup p0.h, p8.h/Z, p10.h[w13, #2]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0592282  // ld1h { za0h.h[x13, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25296140  // dup p0.h, p8.h/Z, p10.h[w13]\n"
+      ".inst 0xe05922a0  // ld1h { za0h.h[x13] }, p0/Z, [x21, x25, LSL #1]\n"
+      ".inst 0x25696140  // dup p0.h, p8.h/Z, p10.h[w13, #2]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0592282  // ld1h { za0h.h[x13, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.h, x23, %x[width]\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "inch x23\n"
+      "inch x25\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x28, 11f\n"
+      "mov x22, %x[in]\n"
+      "whilelt p8.h, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25396140  // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+      "addvl x24, x24, #1\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe05922a1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+      "add x22, x22, #0x8\n"
+      "add x13, x13, #0x2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 9b\n"
+      "whilelt p9.h, x23, %x[width]\n"
+      "whilelt p8.h, x23, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x19, x19, #0x2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "blt 10b\n"
+      "whilelt p9.h, x23, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x24\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
new file mode 100644
index 0000000..bee3cc5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 4, VLType::SME, false>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntb x20\n"
+      "mov x22, %x[width]\n"
+      "incb x22\n"
+      "mov x19, %x[width]\n"
+      "sub x9, x20, #0x1\n"
+      "cntw x28\n"
+      "sub x22, x22, #0x1\n"
+      "ands x9, x19, x9\n"
+      "udiv x22, x22, x20\n"  // n_passes = ceildiv(width, VL<T>)
+      "csel x9, x9, x20, NE\n"
+      "lsl x21, %x[height], #0x1\n"  // height * 2
+      "lsl x20, x28, #0x1\n"
+      "sub x19, x22, #0x1\n"
+      "add x9, x9, #0x3\n"
+      "sub x27, x28, #0x2\n"
+      "whilelt p9.b, XZR, x21\n"
+      "whilelt p8.b, x20, x21\n"
+      "mov x26, #0x0\n"
+      "mov x25, %x[in]\n"
+      "lsr x19, x19, #0x1\n"  // n_loops = (n_passes - 1) / 2
+      "ldr x24, [x25, #0x0]\n"
+      "and x23, x22, #0x1\n"  // odd_tail = bool(n_passes & 0x1)
+      "lsr x9, x9, #0x2\n"
+      "ldr x22, [x25, #0x8]\n"
+      "ptrue p11.s\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x21, %x[row_offset]\n"
+      "mov x20, %x[out]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "add x25, x25, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe0150300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25646140  // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0xe01502c4  // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x27, LSL #2\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe0150300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25646140  // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01502c4  // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "incb x21\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "incb x26\n"
+      "cbz x19, 8f\n"
+      "mov x19, x19\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25756141  // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0bc8281  // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "add x13, x13, #0x8\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25756141  // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "incb x26\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe0bc8281  // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "addvl x20, x20, #2\n"
+      "incb x21\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // psel p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe0152300  // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25656141  // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0bc8289  // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "add x13, x13, #0x8\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      ".inst 0x25256140  // psel p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe0152300  // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25656141  // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "subs x19, x19, #0x1\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe0bc8289  // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "addvl x20, x20, #2\n"
+      "incb x26\n"
+      "incb x21\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x23, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "add x12, x12, #0x1\n"
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      "cmp x12, x28\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      "add x25, x25, #0x8\n"
+      "addvl x20, x20, #1\n"
+      "add x13, x13, #0x4\n"
+      "blt 9b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "add x19, x19, #0x4\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x20\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
new file mode 100644
index 0000000..3ba1b98
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 4, VLType::SME, true>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z18.b, #0x1\n"
+      "mov z17.s, #0x0\n"
+      "cntb x20\n"
+      "cntw x10\n"
+      "ptrue p1.b\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x9, x19, #0x1\n"
+      "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x27, x20, #0x1\n"
+      "ands x27, x19, x27\n"
+      "csel x27, x27, x20, NE\n"
+      "add x27, x27, #0x3\n"
+      "lsr x27, x27, #0x2\n"
+      "sub x26, x10, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x10, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x25, %x[row_offset]\n"
+      "mov x24, %x[out]\n"
+      "mov x23, #0x0\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x24, x24, #-1\n"
+      "ld1w { z17.s }, p1/Z, [x24]\n"
+      "1:"  // K loop: Load row sums: End
+      "mov x22, %x[in]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 3f\n"
+      "2:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01902a0  // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25646140  // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0190284  // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x26, LSL #2\n"
+      "blt 2b\n"
+      "3:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01902a0  // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25646140  // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "mov x22, %x[in]\n"
+      ".inst 0xe0190284  // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "incb x25\n"
+      "incb x23\n"
+      "cbz x9, 9f\n"
+      "mov x19, x9\n"
+      "4:"  // K loop: Main loop
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 6f\n"
+      "5:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25756140  // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192286  // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828430  // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "blt 5b\n"
+      "6:"  // K loop: Main loop: First: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25756140  // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192286  // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828430  // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "incb x23\n"
+      "incb x25\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 8f\n"
+      "7:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01922a0  // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25656140  // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192284  // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828530  // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "blt 7b\n"
+      "8:"  // K loop: Main loop: Second: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01922a0  // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25656140  // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192284  // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828530  // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "incb x23\n"
+      "incb x25\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 4b\n"
+      "9:"  // K loop: Tails
+      "cbnz x28, 12f\n"
+      "mov x22, %x[in]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: First
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      "addvl x24, x24, #1\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "add x22, x22, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "11:"  // K loop: Tails: Even: Second
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x19, x19, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "cmp x12, x27\n"
+      "blt 11b\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "b 14f\n"
+      "12:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "13:"  // K loop: Tails: Odd: Loop
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "sdot z17.s, z16.b, z18.b\n"
+      "blt 13b\n"
+      "14:"  // K loop: End
+      "st1w { z17.s }, p1, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "mov %x[out], x24\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
new file mode 100644
index 0000000..881dfe1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 4, VLType::SME, false>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntb x20\n"
+      "mov x22, %x[width]\n"
+      "incb x22\n"
+      "mov x19, %x[width]\n"
+      "sub x9, x20, #0x1\n"
+      "cntw x28\n"
+      "sub x22, x22, #0x1\n"
+      "ands x9, x19, x9\n"
+      "udiv x22, x22, x20\n"  // n_passes = ceildiv(width, VL<T>)
+      "csel x9, x9, x20, NE\n"
+      "lsl x21, %x[height], #0x1\n"  // height * 2
+      "lsl x20, x28, #0x1\n"
+      "sub x19, x22, #0x1\n"
+      "add x9, x9, #0x3\n"
+      "sub x27, x28, #0x2\n"
+      "whilelt p9.b, XZR, x21\n"
+      "whilelt p8.b, x20, x21\n"
+      "mov x26, #0x0\n"
+      "mov x25, %x[in]\n"
+      "lsr x19, x19, #0x1\n"  // n_loops = (n_passes - 1) / 2
+      "ldr x24, [x25, #0x0]\n"
+      "and x23, x22, #0x1\n"  // odd_tail = bool(n_passes & 0x1)
+      "lsr x9, x9, #0x2\n"
+      "ldr x22, [x25, #0x8]\n"
+      "ptrue p11.s\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x21, %x[row_offset]\n"
+      "mov x20, %x[out]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "add x25, x25, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe0150300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25646140  // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0xe01502c4  // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x27, LSL #2\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe0150300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25646140  // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01502c4  // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "incb x21\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "incb x26\n"
+      "cbz x19, 8f\n"
+      "mov x19, x19\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25756141  // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0bc8281  // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "add x13, x13, #0x8\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25756141  // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "incb x26\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe0bc8281  // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "addvl x20, x20, #2\n"
+      "incb x21\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // psel p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe0152300  // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+      ".inst 0x25656141  // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0bc8289  // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "add x13, x13, #0x8\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      ".inst 0x25256140  // psel p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe0152300  // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25656141  // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe01526c4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "subs x19, x19, #0x1\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe0bc8289  // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "addvl x20, x20, #2\n"
+      "incb x26\n"
+      "incb x21\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x23, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "add x12, x12, #0x1\n"
+      ".inst 0x25356140  // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      "cmp x12, x28\n"
+      ".inst 0xe0152302  // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+      "add x25, x25, #0x8\n"
+      "addvl x20, x20, #1\n"
+      "add x13, x13, #0x4\n"
+      "blt 9b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8288  // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "add x19, x19, #0x4\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8280  // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x20\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
new file mode 100644
index 0000000..231d7ae
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 4, VLType::SME, true>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z18.b, #0x1\n"
+      "mov z17.s, #0x0\n"
+      "cntb x20\n"
+      "cntw x10\n"
+      "ptrue p1.b\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x9, x19, #0x1\n"
+      "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x27, x20, #0x1\n"
+      "ands x27, x19, x27\n"
+      "csel x27, x27, x20, NE\n"
+      "add x27, x27, #0x3\n"
+      "lsr x27, x27, #0x2\n"
+      "sub x26, x10, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x10, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x25, %x[row_offset]\n"
+      "mov x24, %x[out]\n"
+      "mov x23, #0x0\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x24, x24, #-1\n"
+      "ld1w { z17.s }, p1/Z, [x24]\n"
+      "1:"  // K loop: Load row sums: End
+      "mov x22, %x[in]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 3f\n"
+      "2:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01902a0  // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25646140  // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0190284  // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x26, LSL #2\n"
+      "blt 2b\n"
+      "3:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01902a0  // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25646140  // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+      "mov x22, %x[in]\n"
+      ".inst 0xe0190284  // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "incb x25\n"
+      "incb x23\n"
+      "cbz x9, 9f\n"
+      "mov x19, x9\n"
+      "4:"  // K loop: Main loop
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 6f\n"
+      "5:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25756140  // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192286  // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "udot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828430  // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "blt 5b\n"
+      "6:"  // K loop: Main loop: First: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25756140  // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192286  // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "udot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828430  // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "incb x23\n"
+      "incb x25\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 8f\n"
+      "7:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01922a0  // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25656140  // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192284  // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "udot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828530  // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "blt 7b\n"
+      "8:"  // K loop: Main loop: Second: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01922a0  // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+      ".inst 0x25656140  // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0192284  // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "udot z17.s, z16.b, z18.b\n"
+      ".inst 0xc0828530  // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+      "addvl x24, x24, #2\n"
+      "incb x23\n"
+      "incb x25\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 4b\n"
+      "9:"  // K loop: Tails
+      "cbnz x28, 12f\n"
+      "mov x22, %x[in]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: First
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      "addvl x24, x24, #1\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe01922a2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "add x22, x22, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "whilelt p8.b, x23, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "11:"  // K loop: Tails: Even: Second
+      ".inst 0xc0828510  // mova z16.s, p1/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x19, x19, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "cmp x12, x27\n"
+      "blt 11b\n"
+      "whilelt p9.b, x23, %x[width]\n"
+      "b 14f\n"
+      "12:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "13:"  // K loop: Tails: Odd: Loop
+      ".inst 0xc0828410  // mova z16.s, p1/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "udot z17.s, z16.b, z18.b\n"
+      "blt 13b\n"
+      "14:"  // K loop: End
+      "st1w { z17.s }, p1, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "mov %x[out], x24\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
new file mode 100644
index 0000000..f80ca64
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 1, VLType::SME, false>(
+  __fp16 * &out, const __fp16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov x20, %x[width]\n"
+      "inch x20\n"
+      "cnth x10\n"
+      "sub x20, x20, #0x1\n"
+      "udiv x20, x20, x10\n"  // n_passes = ceildiv(width, VL<T>)
+      "mov x19, %x[width]\n"
+      "sub x9, x10, #0x1\n"
+      "sub x28, x20, #0x1\n"
+      "ands x9, x19, x9\n"
+      "sub x27, x10, #0x2\n"
+      "lsl x19, %x[height], #0x1\n"  // height * 2
+      "mov x26, #0x0\n"
+      "mov x25, %x[in]\n"
+      "lsr x28, x28, #0x1\n"  // n_loops = (n_passes - 1) / 2
+      "ldr x24, [x25, #0x0]\n"
+      "and x23, x20, #0x1\n"  // odd_tail = bool(n_passes & 0x1)
+      "csel x9, x9, x10, NE\n"
+      "ldr x22, [x25, #0x8]\n"
+      "ptrue p11.h\n"
+      "whilelt p10.h, XZR, x19\n"
+      "mov x21, %x[row_offset]\n"
+      "mov x20, %x[out]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "add x25, x25, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386140  // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0xe05502c1  // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386140  // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe05502c1  // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      "inch x21\n"
+      "ldr x22, [x25, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "inch x26\n"
+      "cbz x28, 8f\n"
+      "mov x19, x28\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c9  // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      ".inst 0xe06a8281  // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c9  // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "inch x26\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe06a8281  // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "addvl x20, x20, #2\n"
+      "inch x21\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x27, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c1  // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      ".inst 0xe06a8289  // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27\n"
+      "add x25, x25, #0x10\n"
+      "addvl x20, x20, #2\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550300  // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25386141  // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe05506c1  // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+      "ldr x22, [x25, #0x8]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      ".inst 0x25386d20  // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "subs x19, x19, #0x1\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xe06a8289  // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+      "addvl x20, x20, #2\n"
+      "inch x26\n"
+      "inch x21\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x23, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "ldr x24, [x25, #0x0]\n"
+      ".inst 0x25286140  // psel p0.h, p8.h/Z, p10.h[w12]\n"
+      ".inst 0xe0550308  // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "add x25, x25, #0x8\n"
+      "addvl x20, x20, #1\n"
+      "blt 9b\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8288  // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 10b\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25286d20  // psel p0.h, p11.h/Z, p9.h[w12]\n"
+      ".inst 0xe07f8280  // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x9\n"
+      "addvl x20, x20, #1\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x20\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
new file mode 100644
index 0000000..874fc79
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 1, VLType::SME, false>(
+  float * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x10\n"
+      "mov x19, %x[width]\n"
+      "incw x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x10\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x9, x19, #0x1\n"
+      "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x27, x10, #0x1\n"
+      "ands x27, x19, x27\n"
+      "csel x27, x27, x10, NE\n"
+      "sub x26, x10, #0x2\n"
+      "ptrue p11.s\n"
+      "whilelt p10.s, XZR, %x[height]\n"
+      "mov x25, %x[row_offset]\n"
+      "mov x24, %x[out]\n"
+      "mov x23, #0x0\n"
+      "whilelt p9.s, x23, %x[width]\n"
+      "whilelt p8.s, x23, %x[width]\n"
+      "mov x22, %x[in]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a0  // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0990281  // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a0  // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "mov x22, %x[in]\n"
+      ".inst 0xe0990281  // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x21, [x22, #0x0]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      "incw x25\n"
+      "incw x23\n"
+      "cbz x9, 8f\n"
+      "mov x19, x9\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.s, x23, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a8  // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0990289  // ld1w { za2h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a8  // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0990289  // ld1w { za2h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.s, x23, %x[width]\n"
+      ".inst 0xe0aa8301  // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "incw x23\n"
+      "incw x25\n"
+      "whilelt p8.s, x23, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x26, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a0  // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0990281  // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x26\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x22, %x[in]\n"
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      ".inst 0xe09902a0  // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      ".inst 0x25706140  // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe0990281  // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+      "ldr x20, [x22, #0x8]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.s, x23, %x[width]\n"
+      ".inst 0xe0aa8309  // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+      "addvl x24, x24, #2\n"
+      "incw x23\n"
+      "incw x25\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x28, 11f\n"
+      "mov x22, %x[in]\n"
+      "whilelt p8.s, x23, %x[width]\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      ".inst 0x25306140  // dup p0.s, p8.s/Z, p10.s[w12]\n"
+      "addvl x24, x24, #1\n"
+      "ldr x21, [x22, #0x0]\n"
+      ".inst 0xe09902a8  // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+      "add x22, x22, #0x8\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 9b\n"
+      "whilelt p9.s, x23, %x[width]\n"
+      "whilelt p8.s, x23, %x[width]\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8308  // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "blt 10b\n"
+      "whilelt p9.s, x23, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8300  // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+      "addvl x24, x24, #1\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x27\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x24\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
new file mode 100644
index 0000000..61fed43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 1, VLType::SME, false>(
+  bfloat16 * &out, const bfloat16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x27\n"
+      "cmp %x[height], x27\n"
+      "cnth x26\n"
+      "csel x27, %x[height], x27, LT\n"
+      "mov x25, #0x0\n"
+      "ptrue p13.s\n"
+      "sub x27, x27, #0x1\n"
+      "whilelt p12.h, XZR, %x[height]\n"
+      "whilelt p11.h, x26, %x[height]\n"
+      "mov x24, %x[row_offset]\n"
+      "mov x23, %x[out]\n"
+      "whilelt p10.h, x25, %x[width]\n"
+      "whilelt p9.h, x25, %x[width]\n"
+      "whilelt p8.h, x25, %x[width]\n"
+      "1:"  // Width loop
+      "add x22, %x[in], XZR, LSL #3\n"
+      "add x19, %x[in], x26, LSL #3\n"
+      "ldr x21, [x22], #0x8\n"
+      "mov x12, #0x0\n"
+      "ldr x20, [x19], #0x8\n"
+      "cbz x27, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25286581  // psel p1.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05806a0  // ld1h { za0h.h[x12] }, p1/Z, [x21, x24, LSL #1]\n"
+      "ldr x21, [x22], #0x8\n"
+      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27, LSL #1\n"
+      "ldr x20, [x19], #0x8\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      "sub x19, %x[width], x25\n"
+      ".inst 0x25286580  // psel p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05802a0  // ld1h { za0h.h[x12] }, p0/Z, [x21, x24, LSL #1]\n"
+      ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
+      "cmp x19, x26\n"
+      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      "mov x12, #0x0\n"
+      "csel x19, x19, x26, LT\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
+      ".inst 0xe07f82e0  // st1h { za0v.h[x12] }, p0/Z, [x23, XZR, LSL #1]\n"
+      ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
+      ".inst 0xe07a82e8  // st1h { za1v.h[x12] }, p0/Z, [x23, x26, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "addvl x23, x23, #4\n"
+      "blt 4b\n"
+      "inch x25\n"
+      "whilelt p10.h, x25, %x[width]\n"
+      "whilelt p9.h, x25, %x[width]\n"
+      "whilelt p8.h, x25, %x[width]\n"
+      "inch x24\n"
+      "b.any 1b\n"
+      "mov %x[out], x23\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
new file mode 100644
index 0000000..fc7596e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 2, VLType::SME, false>(
+  bfloat16 * &out, const bfloat16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x20\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "mov x19, %x[width]\n"
+      "inch x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x1\n"
+      "lsr x10, x10, #0x1\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p13.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p12.h, XZR, x20\n"
+      "whilelt p11.h, x19, x20\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05c02e0  // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05c02c8  // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25686580  // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+      ".inst 0xe05c02a2  // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25686160  // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+      ".inst 0xe05c028a  // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x9, LSL #1\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05c02e0  // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05c02c8  // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25686580  // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+      ".inst 0xe05c02a2  // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25686160  // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+      ".inst 0xe05c028a  // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "inch x28\n"
+      "inch x26\n"
+      "cbz x13, 8f\n"
+      "mov x19, x13\n"
+      "3:"  // K loop: Main loop
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25396580  // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe05c22e1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25796580  // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+      ".inst 0xe05c22a3  // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25796160  // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+      ".inst 0xe05c228b  // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25396580  // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe05c22e1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25796580  // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+      ".inst 0xe05c22a3  // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25796160  // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+      ".inst 0xe05c228b  // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "inch x26\n"
+      "inch x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25296580  // dup p0.h, p9.h/Z, p12.h[w13]\n"
+      ".inst 0xe05c22e0  // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25296160  // dup p0.h, p8.h/Z, p11.h[w13]\n"
+      ".inst 0xe05c22c8  // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25696580  // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+      ".inst 0xe05c22a2  // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25696160  // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+      ".inst 0xe05c228a  // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25296580  // dup p0.h, p9.h/Z, p12.h[w13]\n"
+      ".inst 0xe05c22e0  // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25296160  // dup p0.h, p8.h/Z, p11.h[w13]\n"
+      ".inst 0xe05c22c8  // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25696580  // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+      ".inst 0xe05c22a2  // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25696160  // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+      ".inst 0xe05c228a  // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "inch x26\n"
+      "inch x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x11, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25396581  // dup p1.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      "addvl x27, x27, #2\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe05c26e1  // ld1h { za0h.h[x13, #1] }, p1/Z, [x23, x28, LSL #1]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x2\n"
+      "cmp x12, x16\n"
+      "blt 9b\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      "add x19, x19, #0x2\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
new file mode 100644
index 0000000..67570a1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 2, VLType::SME, false>(
+  __fp16 * &out, const __fp16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x20\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "mov x19, %x[width]\n"
+      "inch x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x1\n"
+      "lsr x10, x10, #0x1\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p13.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p12.h, XZR, x20\n"
+      "whilelt p11.h, x19, x20\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05c02e0  // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05c02c8  // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25686580  // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+      ".inst 0xe05c02a2  // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25686160  // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+      ".inst 0xe05c028a  // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x9, LSL #1\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05c02e0  // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05c02c8  // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25686580  // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+      ".inst 0xe05c02a2  // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25686160  // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+      ".inst 0xe05c028a  // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "inch x28\n"
+      "inch x26\n"
+      "cbz x13, 8f\n"
+      "mov x19, x13\n"
+      "3:"  // K loop: Main loop
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25396580  // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe05c22e1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25796580  // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+      ".inst 0xe05c22a3  // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25796160  // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+      ".inst 0xe05c228b  // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25396580  // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe05c22e1  // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25796580  // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+      ".inst 0xe05c22a3  // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25796160  // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+      ".inst 0xe05c228b  // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "inch x26\n"
+      "inch x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25296580  // dup p0.h, p9.h/Z, p12.h[w13]\n"
+      ".inst 0xe05c22e0  // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25296160  // dup p0.h, p8.h/Z, p11.h[w13]\n"
+      ".inst 0xe05c22c8  // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25696580  // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+      ".inst 0xe05c22a2  // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25696160  // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+      ".inst 0xe05c228a  // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "add x13, x13, #0x4\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25296580  // dup p0.h, p9.h/Z, p12.h[w13]\n"
+      ".inst 0xe05c22e0  // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+      ".inst 0x25296160  // dup p0.h, p8.h/Z, p11.h[w13]\n"
+      ".inst 0xe05c22c8  // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+      ".inst 0x25696580  // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+      ".inst 0xe05c22a2  // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+      ".inst 0x25696160  // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+      ".inst 0xe05c228a  // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "inch x26\n"
+      "inch x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x11, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25396581  // dup p1.h, p9.h/Z, p12.h[w13, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25396160  // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+      "addvl x27, x27, #2\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe05c26e1  // ld1h { za0h.h[x13, #1] }, p1/Z, [x23, x28, LSL #1]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      ".inst 0xe05c22c9  // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x2\n"
+      "cmp x12, x16\n"
+      "blt 9b\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      "add x19, x19, #0x2\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
new file mode 100644
index 0000000..22f0933
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 4, VLType::SME, false>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntb x20\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x3\n"
+      "lsr x10, x10, #0x2\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x9, LSL #2\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "incb x28\n"
+      "incb x26\n"
+      "cbz x13, 8f\n"
+      "mov x19, x13\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incb x26\n"
+      "incb x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incb x26\n"
+      "incb x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x11, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "addvl x27, x27, #2\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26e2  // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x16\n"
+      "blt 9b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "add x19, x19, #0x4\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
new file mode 100644
index 0000000..81cde6c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 4, VLType::SME, true>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z20.b, #0x1\n"
+      "mov z19.s, #0x0\n"
+      "cntb x20\n"
+      "mov z18.s, #0x0\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "ptrue p2.b\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x3\n"
+      "lsr x10, x10, #0x2\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x27, x27, #-2\n"
+      "ld1w { z19.s }, p2/Z, [x27]\n"
+      "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
+      "1:"  // K loop: Load row sums: End
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 3f\n"
+      "2:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x9, LSL #2\n"
+      "blt 2b\n"
+      "3:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "incb x28\n"
+      "incb x26\n"
+      "cbz x13, 9f\n"
+      "mov x19, x13\n"
+      "4:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 6f\n"
+      "5:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828831  // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08288b0  // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 5b\n"
+      "6:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828831  // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08288b0  // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "incb x26\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      "incb x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 8f\n"
+      "7:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828931  // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08289b0  // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 7b\n"
+      "8:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828931  // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08289b0  // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "incb x26\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      "incb x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 4b\n"
+      "9:"  // K loop: Tails
+      "cbnz x11, 12f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: First
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26e2  // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      "addvl x27, x27, #2\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x16\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "11:"  // K loop: Tails: Even: Second
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "add x19, x19, #0x4\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 11b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 14f\n"
+      "12:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "13:"  // K loop: Tails: Odd: Loop
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "sdot z19.s, z17.b, z20.b\n"
+      "sdot z18.s, z16.b, z20.b\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 13b\n"
+      "14:"  // K loop: End
+      "st1w { z19.s }, p2, [x27]\n"
+      "st1w { z18.s }, p2, [x27, #1, MUL VL]\n"
+      "addvl x27, x27, #2\n"
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
new file mode 100644
index 0000000..cd4a766
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 4, VLType::SME, false>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntb x20\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x3\n"
+      "lsr x10, x10, #0x2\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x9, LSL #2\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "incb x28\n"
+      "incb x26\n"
+      "cbz x13, 8f\n"
+      "mov x19, x13\n"
+      "3:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incb x26\n"
+      "incb x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incb x26\n"
+      "incb x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x11, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "addvl x27, x27, #2\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26e2  // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x16\n"
+      "blt 9b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "add x19, x19, #0x4\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
new file mode 100644
index 0000000..5a71613
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 4, VLType::SME, true>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z20.b, #0x1\n"
+      "mov z19.s, #0x0\n"
+      "cntb x20\n"
+      "mov z18.s, #0x0\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "ptrue p2.b\n"
+      "mov x19, %x[width]\n"
+      "incb x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x20, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x20, NE\n"
+      "add x10, x10, #0x3\n"
+      "lsr x10, x10, #0x2\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p11.s\n"
+      "lsl x20, %x[height], #0x1\n" // height * 2
+      "lsl x19, x16, #0x1\n"
+      "whilelt p9.b, XZR, x20\n"
+      "whilelt p8.b, x19, x20\n"
+      "zip1 p10.b, p9.b, p8.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x27, x27, #-2\n"
+      "ld1w { z19.s }, p2/Z, [x27]\n"
+      "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
+      "1:"  // K loop: Load row sums: End
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 3f\n"
+      "2:"  // K loop: Charge: Loop
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x8\n"
+      "cmp x12, x9, LSL #2\n"
+      "blt 2b\n"
+      "3:"  // K loop: Charge: End
+      ".inst 0x25246140  // dup p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c02e0  // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252c6140  // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0x25646141  // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+      ".inst 0xe01c02c1  // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256c6140  // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+      "mov x25, %x[in]\n"
+      ".inst 0xe01c06a4  // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c0285  // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "incb x28\n"
+      "incb x26\n"
+      "cbz x13, 9f\n"
+      "mov x19, x13\n"
+      "4:"  // K loop: Main loop
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 6f\n"
+      "5:"  // K loop: Main loop: First: Loop
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828831  // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08288b0  // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 5b\n"
+      "6:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25356140  // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe01c22e2  // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      ".inst 0x25756141  // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      ".inst 0x257d6140  // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a6  // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2287  // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828831  // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08288b0  // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "incb x26\n"
+      "udot z18.s, z16.b, z20.b\n"
+      "incb x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 8f\n"
+      "7:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828931  // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08289b0  // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "add x13, x13, #0x8\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 7b\n"
+      "8:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe01c22e0  // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25656141  // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+      ".inst 0xe01c22c1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+      ".inst 0x256d6140  // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26a4  // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      ".inst 0xe01c2285  // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      ".inst 0xc0828931  // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+      "udot z18.s, z16.b, z20.b\n"
+      ".inst 0xc08289b0  // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25706d21  // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+      ".inst 0x25706d20  // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "incb x26\n"
+      "udot z18.s, z16.b, z20.b\n"
+      "incb x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 4b\n"
+      "9:"  // K loop: Tails
+      "cbnz x11, 12f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x13, #0x0\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: First
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "udot z18.s, z16.b, z20.b\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe01c26e2  // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      "addvl x27, x27, #2\n"
+      ".inst 0xe01c22c3  // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+      "add x25, x25, #0x8\n"
+      "add x13, x13, #0x4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x16\n"
+      "blt 10b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "whilelt p8.b, x26, %x[width]\n"
+      "mov x19, #0x0\n"
+      "mov x12, #0x0\n"
+      "11:"  // K loop: Tails: Even: Second
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "add x19, x19, #0x4\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "udot z18.s, z16.b, z20.b\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 11b\n"
+      "whilelt p9.b, x26, %x[width]\n"
+      "b 14f\n"
+      "12:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "13:"  // K loop: Tails: Odd: Loop
+      ".inst 0xc0828811  // mova z17.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0xc0828890  // mova z16.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "udot z19.s, z17.b, z20.b\n"
+      "udot z18.s, z16.b, z20.b\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 13b\n"
+      "14:"  // K loop: End
+      "st1w { z19.s }, p2, [x27]\n"
+      "st1w { z18.s }, p2, [x27, #1, MUL VL]\n"
+      "addvl x27, x27, #2\n"
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
new file mode 100644
index 0000000..3ea616f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 1, VLType::SME, false>(
+  __fp16 * &out, const __fp16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x27\n"
+      "cmp %x[height], x27\n"
+      "cnth x26\n"
+      "csel x27, %x[height], x27, LT\n"
+      "mov x25, #0x0\n"
+      "ptrue p13.s\n"
+      "sub x27, x27, #0x1\n"
+      "whilelt p12.h, XZR, %x[height]\n"
+      "whilelt p11.h, x26, %x[height]\n"
+      "mov x24, %x[row_offset]\n"
+      "mov x23, %x[out]\n"
+      "whilelt p10.h, x25, %x[width]\n"
+      "whilelt p9.h, x25, %x[width]\n"
+      "whilelt p8.h, x25, %x[width]\n"
+      "1:"  // Width loop
+      "add x22, %x[in], XZR, LSL #3\n"
+      "add x19, %x[in], x26, LSL #3\n"
+      "ldr x21, [x22], #0x8\n"
+      "mov x12, #0x0\n"
+      "ldr x20, [x19], #0x8\n"
+      "cbz x27, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25286581  // psel p1.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe05806a0  // ld1h { za0h.h[x12] }, p1/Z, [x21, x24, LSL #1]\n"
+      "ldr x21, [x22], #0x8\n"
+      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x27, LSL #1\n"
+      "ldr x20, [x19], #0x8\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      "sub x19, %x[width], x25\n"
+      ".inst 0x25286580  // psel p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe05802a0  // ld1h { za0h.h[x12] }, p0/Z, [x21, x24, LSL #1]\n"
+      ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
+      "cmp x19, x26\n"
+      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      "mov x12, #0x0\n"
+      "csel x19, x19, x26, LT\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
+      ".inst 0xe07f82e0  // st1h { za0v.h[x12] }, p0/Z, [x23, XZR, LSL #1]\n"
+      ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
+      ".inst 0xe07a82e8  // st1h { za1v.h[x12] }, p0/Z, [x23, x26, LSL #1]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "addvl x23, x23, #4\n"
+      "blt 4b\n"
+      "inch x25\n"
+      "whilelt p10.h, x25, %x[width]\n"
+      "whilelt p9.h, x25, %x[width]\n"
+      "whilelt p8.h, x25, %x[width]\n"
+      "inch x24\n"
+      "b.any 1b\n"
+      "mov %x[out], x23\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
new file mode 100644
index 0000000..d702542
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<2, 1, VLType::SME, false>(
+  float * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x16\n"
+      "cntw x15, ALL, MUL #2\n"
+      "cntw x14, ALL, MUL #3\n"
+      "mov x19, %x[width]\n"
+      "incw x19\n"
+      "sub x19, x19, #0x1\n"
+      "udiv x19, x19, x16\n" // n_passes = ceildiv(width, VL<T>)
+      "sub x13, x19, #0x1\n"
+      "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+      "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+      "mov x19, %x[width]\n"
+      "sub x10, x16, #0x1\n"
+      "ands x10, x19, x10\n"
+      "csel x10, x10, x16, NE\n"
+      "sub x9, x16, #0x2\n"
+      "ptrue p13.s\n"
+      "whilelt p12.s, XZR, %x[height]\n"
+      "whilelt p11.s, x16, %x[height]\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "mov x26, #0x0\n"
+      "whilelt p10.s, x26, %x[width]\n"
+      "whilelt p9.s, x26, %x[width]\n"
+      "whilelt p8.s, x26, %x[width]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 2f\n"
+      "1:"  // K loop: Charge: Loop
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e0  // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02c4  // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a1  // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c0285  // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 1b\n"
+      "2:"  // K loop: Charge: End
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e0  // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02c4  // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a1  // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c0285  // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      "incw x28\n"
+      "incw x26\n"
+      "cbz x13, 8f\n"
+      "mov x19, x13\n"
+      "3:"  // K loop: Main loop
+      "whilelt p9.s, x26, %x[width]\n"
+      "whilelt p8.s, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 5f\n"
+      "4:"  // K loop: Main loop: First: Loop
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e8  // ld1w { za2h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02cc  // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a9  // ld1w { za2h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c028d  // ld1w { za3h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 4b\n"
+      "5:"  // K loop: Main loop: First: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e8  // ld1w { za2h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02cc  // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a9  // ld1w { za2h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c028d  // ld1w { za3h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.s, x26, %x[width]\n"
+      ".inst 0xe0af8761  // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incw x26\n"
+      "incw x28\n"
+      ".inst 0xe0ae8365  // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "whilelt p9.s, x26, %x[width]\n"
+      "whilelt p8.s, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "cbz x9, 7f\n"
+      "6:"  // K loop: Main loop: Second: Loop
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e0  // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02c4  // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a1  // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c0285  // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x9\n"
+      "blt 6b\n"
+      "7:"  // K loop: Main loop: Second: Tail
+      "mov x25, %x[in]\n"
+      "add x24, %x[in], x16, LSL #3\n"
+      ".inst 0x25306580  // dup p0.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe09c02e0  // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      ".inst 0xe09c02c4  // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      ".inst 0x25706580  // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+      ".inst 0xe09c02a1  // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+      ".inst 0x25706160  // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+      ".inst 0xe09c0285  // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+      "ldr x23, [x25, #0x0]\n"
+      "ldr x22, [x24, #0x0]\n"
+      "ldr x21, [x25, #0x8]\n"
+      "ldr x20, [x24, #0x8]\n"
+      "add x25, x25, #0x10\n"
+      "add x24, x24, #0x10\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25707541  // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25707540  // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+      "whilelt p10.s, x26, %x[width]\n"
+      ".inst 0xe0af8769  // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+      "incw x26\n"
+      "incw x28\n"
+      ".inst 0xe0ae836d  // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+      "addvl x27, x27, #4\n"
+      "subs x19, x19, #0x1\n"
+      "bgt 3b\n"
+      "8:"  // K loop: Tails
+      "cbnz x11, 11f\n"
+      "mov x25, %x[in]\n"
+      "whilelt p9.s, x26, %x[width]\n"
+      "whilelt p8.s, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "9:"  // K loop: Tails: Even: First
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25306581  // dup p1.s, p9.s/Z, p12.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      ".inst 0x25306160  // dup p0.s, p8.s/Z, p11.s[w12]\n"
+      "addvl x27, x27, #2\n"
+      "ldr x23, [x25, #0x0]\n"
+      ".inst 0xe09c06e8  // ld1w { za2h.s[x12] }, p1/Z, [x23, x28, LSL #2]\n"
+      "ldr x22, [x25, x16, LSL #0x3]\n"
+      ".inst 0xe09c02cc  // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x16\n"
+      "add x25, x25, #0x8\n"
+      "blt 9b\n"
+      "whilelt p10.s, x26, %x[width]\n"
+      "whilelt p9.s, x26, %x[width]\n"
+      "whilelt p8.s, x26, %x[width]\n"
+      "mov x12, #0x0\n"
+      "10:"  // K loop: Tails: Even: Second
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8368  // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0b0836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 10b\n"
+      "whilelt p10.s, x26, %x[width]\n"
+      "b 13f\n"
+      "11:"  // K loop: Tails: Odd
+      "mov x12, #0x0\n"
+      "12:"  // K loop: Tails: Odd: Loop
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0b08364  // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+      "addvl x27, x27, #2\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 12b\n"
+      "13:"  // K loop: End
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
new file mode 100644
index 0000000..556d148
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 2, VLType::SME, false>(
+  bfloat16 * &out, const bfloat16 * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x15\n"
+      "cntw x14, ALL, MUL #2\n"
+      "cntw x13, ALL, MUL #3\n"
+      "cnth x11\n"
+      "ptrue p13.s\n"
+      "cntw x10\n"
+      "cmp %x[height], x10\n"
+      "csel x10, %x[height], x10, LT\n"
+      "sub x10, x10, #0x1\n"
+      "whilelt p10.h, XZR, %x[height]\n"
+      "whilelt p9.h, x15, %x[height]\n"
+      "whilelt p8.h, x14, %x[height]\n"
+      "zip1 p12.h, p10.h, p8.h\n"
+      "whilelt p8.h, x13, %x[height]\n"
+      "zip1 p11.h, p9.h, p8.h\n"
+      "mov x9, %x[row_offset]\n"
+      "mov x28, %x[out]\n"
+      "mov x27, #0x0\n"
+      "whilelt p10.h, x27, %x[width]\n"
+      "whilelt p9.h, x27, %x[width]\n"
+      "whilelt p8.h, x27, %x[width]\n"
+      "1:"  // Width loop
+      "mov x12, #0x0\n"
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x15, LSL #3\n"
+      "add x24, %x[in], x14, LSL #3\n"
+      "add x23, %x[in], x13, LSL #3\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x20, [x24], #0x8\n"
+      "ldr x19, [x23], #0x8\n"
+      "cbz x10, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe04902c0  // ld1h { za0h.h[x12] }, p0/Z, [x22, x9, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe04902a8  // ld1h { za1h.h[x12] }, p0/Z, [x21, x9, LSL #1]\n"
+      ".inst 0x25386580  // dup p0.h, p9.h/Z, p12.h[w12, #1]\n"
+      ".inst 0xe0490281  // ld1h { za0h.h[x12, #1] }, p0/Z, [x20, x9, LSL #1]\n"
+      ".inst 0x25386160  // dup p0.h, p8.h/Z, p11.h[w12, #1]\n"
+      ".inst 0xe0490269  // ld1h { za1h.h[x12, #1] }, p0/Z, [x19, x9, LSL #1]\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x20, [x24], #0x8\n"
+      "ldr x19, [x23], #0x8\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x10, LSL #1\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      ".inst 0x25286580  // dup p0.h, p9.h/Z, p12.h[w12]\n"
+      ".inst 0xe04902c0  // ld1h { za0h.h[x12] }, p0/Z, [x22, x9, LSL #1]\n"
+      ".inst 0x25286160  // dup p0.h, p8.h/Z, p11.h[w12]\n"
+      ".inst 0xe04902a8  // ld1h { za1h.h[x12] }, p0/Z, [x21, x9, LSL #1]\n"
+      ".inst 0x25386580  // dup p0.h, p9.h/Z, p12.h[w12, #1]\n"
+      ".inst 0xe0490281  // ld1h { za0h.h[x12, #1] }, p0/Z, [x20, x9, LSL #1]\n"
+      ".inst 0x25386160  // dup p0.h, p8.h/Z, p11.h[w12, #1]\n"
+      ".inst 0xe0490269  // ld1h { za1h.h[x12, #1] }, p0/Z, [x19, x9, LSL #1]\n"
+      "mov x12, #0x0\n"
+      "sub x19, %x[width], x27\n"
+      "cmp x19, x11\n"
+      "csel x19, x19, x11, LT\n"
+      "add x19, x19, #0x1\n"
+      "lsr x19, x19, #0x1\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0bf8380  // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0x25307541  // dup p1.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0af8384  // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+      ".inst 0x25307540  // dup p0.s, p13.s/Z, p10.s[w12]\n"
+      ".inst 0xe0ae8788  // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
+      ".inst 0xe0ad838c  // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+      "addvl x28, x28, #4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "blt 4b\n"
+      "inch x9\n"
+      "inch x27\n"
+      "whilelt p10.h, x27, %x[width]\n"
+      "whilelt p9.h, x27, %x[width]\n"
+      "whilelt p8.h, x27, %x[width]\n"
+      "b.any 1b\n"
+      "mov %x[out], x28\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
new file mode 100644
index 0000000..49d1aa5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 4, VLType::SME, false>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x15\n"
+      "cntw x14\n"
+      "cntw x13, ALL, MUL #2\n"
+      "cntw x11, ALL, MUL #3\n"
+      "cmp %x[height], x15\n"
+      "csel x15, %x[height], x15, LT\n"
+      "whilelt p12.b, XZR, %x[height]\n"
+      "whilelt p10.b, x14, %x[height]\n"
+      "whilelt p9.b, x13, %x[height]\n"
+      "whilelt p8.b, x11, %x[height]\n"
+      "zip1 p12.b, p12.b, p9.b\n"
+      "zip1 p10.b, p10.b, p8.b\n"
+      "mov x10, #0x0\n"
+      "cntb x9\n"
+      "ptrue p11.s\n"
+      "sub x15, x15, #0x1\n"
+      "zip1 p10.b, p12.b, p10.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "whilelt p9.b, x10, %x[width]\n"
+      "whilelt p8.b, x10, %x[width]\n"
+      "1:"  // Width loop
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x14, LSL #3\n"
+      "ldr x24, [x26], #0x8\n"
+      "add x23, %x[in], x13, LSL #3\n"
+      "add x22, %x[in], x11, LSL #3\n"
+      "ldr x19, [x25], #0x8\n"
+      "mov x12, #0x0\n"
+      "ldr x21, [x23], #0x8\n"
+      "ldr x20, [x22], #0x8\n"
+      "cbz x15, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c0300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
+      ".inst 0x252c6140  // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      "ldr x24, [x26], #0x8\n"
+      ".inst 0xe01c0261  // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
+      ".inst 0x25346141  // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
+      ".inst 0x253c6140  // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
+      "ldr x19, [x25], #0x8\n"
+      ".inst 0xe01c06a2  // ld1b { za0h.b[x12, #2] }, p1/Z, [x21, x28]\n"
+      "ldr x21, [x23], #0x8\n"
+      ".inst 0xe01c0283  // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x15, LSL #2\n"
+      "ldr x20, [x22], #0x8\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c0300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
+      ".inst 0x252c6140  // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0xe01c0261  // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
+      ".inst 0x25346140  // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
+      "sub x19, %x[width], x10\n"
+      ".inst 0xe01c02a2  // ld1b { za0h.b[x12, #2] }, p0/Z, [x21, x28]\n"
+      "cmp x19, x9\n"
+      "csel x19, x19, x9, LT\n"
+      ".inst 0x253c6140  // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
+      "add x19, x19, #0x3\n"
+      ".inst 0xe01c0283  // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+      "mov x12, #0x0\n"
+      "lsr x19, x19, #0x2\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0ae8364  // st1w { za1v.s[x12] }, p0/Z, [x27, x14, LSL #2]\n"
+      ".inst 0x25306d21  // psel p1.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0ad8768  // st1w { za2v.s[x12] }, p1/Z, [x27, x13, LSL #2]\n"
+      ".inst 0xe0ab836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x11, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "addvl x27, x27, #4\n"
+      "blt 4b\n"
+      "incb x10\n"
+      "whilelt p9.b, x10, %x[width]\n"
+      "whilelt p8.b, x10, %x[width]\n"
+      "incb x28\n"
+      "b.any 1b\n"
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
new file mode 100644
index 0000000..94673d4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 4, VLType::SME, true>(
+  int8_t * &out, const int8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z24.b, #0x1\n"
+      "mov z23.s, #0x0\n"
+      "ptrue p2.b\n"
+      "mov z22.s, #0x0\n"
+      "cntw x16\n"
+      "mov z21.s, #0x0\n"
+      "cntw x15, ALL, MUL #2\n"
+      "mov z20.s, #0x0\n"
+      "cntw x14, ALL, MUL #3\n"
+      "cntb x11\n"
+      "ptrue p11.s\n"
+      "cntw x10\n"
+      "cmp %x[height], x10\n"
+      "csel x10, %x[height], x10, LT\n"
+      "sub x10, x10, #0x1\n"
+      "whilelt p10.b, XZR, %x[height]\n"
+      "whilelt p9.b, x16, %x[height]\n"
+      "whilelt p8.b, x15, %x[height]\n"
+      "zip1 p10.b, p10.b, p8.b\n"
+      "whilelt p8.b, x14, %x[height]\n"
+      "zip1 p9.b, p9.b, p8.b\n"
+      "mov x9, %x[row_offset]\n"
+      "mov x28, %x[out]\n"
+      "zip1 p10.b, p10.b, p9.b\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x28, x28, #-4\n"
+      "ld1w { z23.s }, p2/Z, [x28]\n"
+      "ld1w { z22.s }, p2/Z, [x28, #1, MUL VL]\n"
+      "ld1w { z21.s }, p2/Z, [x28, #2, MUL VL]\n"
+      "ld1w { z20.s }, p2/Z, [x28, #3, MUL VL]\n"
+      "1:"  // Initialise row sums: End
+      "mov x27, #0x0\n"
+      "whilelt p9.b, x27, %x[width]\n"
+      "whilelt p8.b, x27, %x[width]\n"
+      "2:"  // Width loop
+      "mov x13, #0x0\n"
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x16, LSL #3\n"
+      "add x24, %x[in], x15, LSL #3\n"
+      "add x23, %x[in], x14, LSL #3\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x19, [x24], #0x8\n"
+      "ldr x20, [x23], #0x8\n"
+      "cbz x10, 4f\n"
+      "3:"  // Loads: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe00922c0  // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe00922a1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "ldr x22, [x26], #0x8\n"
+      ".inst 0xe0092662  // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x19, [x24], #0x8\n"
+      ".inst 0xe0092283  // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+      "ldr x20, [x23], #0x8\n"
+      "add x13, x13, #0x4\n"
+      "cmp x13, x10, LSL #2\n"
+      "blt 3b\n"
+      "4:"  // Loads: Tail
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe00922c0  // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe00922a1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "mov x12, #0x0\n"
+      ".inst 0xe0092662  // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+      "sub x19, %x[width], x27\n"
+      "cmp x19, x11\n"
+      ".inst 0xe0092283  // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+      "csel x19, x19, x11, LT\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "5:"  // Stores: Loop
+      ".inst 0xc0828813  // mova z19.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8380  // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+      ".inst 0xc0828892  // mova z18.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xe0b08384  // st1w { za1v.s[x12] }, p0/Z, [x28, x16, LSL #2]\n"
+      ".inst 0x25306d21  // dup p1.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "sdot z23.s, z19.b, z24.b\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "sdot z22.s, z18.b, z24.b\n"
+      ".inst 0xe0af8788  // st1w { za2v.s[x12] }, p1/Z, [x28, x15, LSL #2]\n"
+      "sdot z21.s, z17.b, z24.b\n"
+      "sdot z20.s, z16.b, z24.b\n"
+      ".inst 0xe0ae838c  // st1w { za3v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+      "addvl x28, x28, #4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "blt 5b\n"
+      "incb x9\n"
+      "incb x27\n"
+      "whilelt p9.b, x27, %x[width]\n"
+      "whilelt p8.b, x27, %x[width]\n"
+      "b.any 2b\n"
+      "st1w { z23.s }, p2, [x28]\n"
+      "st1w { z22.s }, p2, [x28, #1, MUL VL]\n"
+      "st1w { z21.s }, p2, [x28, #2, MUL VL]\n"
+      "st1w { z20.s }, p2, [x28, #3, MUL VL]\n"
+      "addvl x28, x28, #4\n"
+      "mov %x[out], x28\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
new file mode 100644
index 0000000..bbdaaa3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 4, VLType::SME, false>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x15\n"
+      "cntw x14\n"
+      "cntw x13, ALL, MUL #2\n"
+      "cntw x11, ALL, MUL #3\n"
+      "cmp %x[height], x15\n"
+      "csel x15, %x[height], x15, LT\n"
+      "whilelt p12.b, XZR, %x[height]\n"
+      "whilelt p10.b, x14, %x[height]\n"
+      "whilelt p9.b, x13, %x[height]\n"
+      "whilelt p8.b, x11, %x[height]\n"
+      "zip1 p12.b, p12.b, p9.b\n"
+      "zip1 p10.b, p10.b, p8.b\n"
+      "mov x10, #0x0\n"
+      "cntb x9\n"
+      "ptrue p11.s\n"
+      "sub x15, x15, #0x1\n"
+      "zip1 p10.b, p12.b, p10.b\n"
+      "mov x28, %x[row_offset]\n"
+      "mov x27, %x[out]\n"
+      "whilelt p9.b, x10, %x[width]\n"
+      "whilelt p8.b, x10, %x[width]\n"
+      "1:"  // Width loop
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x14, LSL #3\n"
+      "ldr x24, [x26], #0x8\n"
+      "add x23, %x[in], x13, LSL #3\n"
+      "add x22, %x[in], x11, LSL #3\n"
+      "ldr x19, [x25], #0x8\n"
+      "mov x12, #0x0\n"
+      "ldr x21, [x23], #0x8\n"
+      "ldr x20, [x22], #0x8\n"
+      "cbz x15, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c0300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
+      ".inst 0x252c6140  // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      "ldr x24, [x26], #0x8\n"
+      ".inst 0xe01c0261  // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
+      ".inst 0x25346141  // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
+      ".inst 0x253c6140  // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
+      "ldr x19, [x25], #0x8\n"
+      ".inst 0xe01c06a2  // ld1b { za0h.b[x12, #2] }, p1/Z, [x21, x28]\n"
+      "ldr x21, [x23], #0x8\n"
+      ".inst 0xe01c0283  // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x15, LSL #2\n"
+      "ldr x20, [x22], #0x8\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      ".inst 0x25246140  // psel p0.b, p8.b/Z, p10.b[w12]\n"
+      ".inst 0xe01c0300  // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
+      ".inst 0x252c6140  // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
+      ".inst 0xe01c0261  // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
+      ".inst 0x25346140  // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
+      "sub x19, %x[width], x10\n"
+      ".inst 0xe01c02a2  // ld1b { za0h.b[x12, #2] }, p0/Z, [x21, x28]\n"
+      "cmp x19, x9\n"
+      "csel x19, x19, x9, LT\n"
+      ".inst 0x253c6140  // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
+      "add x19, x19, #0x3\n"
+      ".inst 0xe01c0283  // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+      "mov x12, #0x0\n"
+      "lsr x19, x19, #0x2\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8360  // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0ae8364  // st1w { za1v.s[x12] }, p0/Z, [x27, x14, LSL #2]\n"
+      ".inst 0x25306d21  // psel p1.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0x25306d20  // psel p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0ad8768  // st1w { za2v.s[x12] }, p1/Z, [x27, x13, LSL #2]\n"
+      ".inst 0xe0ab836c  // st1w { za3v.s[x12] }, p0/Z, [x27, x11, LSL #2]\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "addvl x27, x27, #4\n"
+      "blt 4b\n"
+      "incb x10\n"
+      "whilelt p9.b, x10, %x[width]\n"
+      "whilelt p8.b, x10, %x[width]\n"
+      "incb x28\n"
+      "b.any 1b\n"
+      "mov %x[out], x27\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
new file mode 100644
index 0000000..961008a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 4, VLType::SME, true>(
+  uint8_t * &out, const uint8_t * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "mov z24.b, #0x1\n"
+      "mov z23.s, #0x0\n"
+      "ptrue p2.b\n"
+      "mov z22.s, #0x0\n"
+      "cntw x16\n"
+      "mov z21.s, #0x0\n"
+      "cntw x15, ALL, MUL #2\n"
+      "mov z20.s, #0x0\n"
+      "cntw x14, ALL, MUL #3\n"
+      "cntb x11\n"
+      "ptrue p11.s\n"
+      "cntw x10\n"
+      "cmp %x[height], x10\n"
+      "csel x10, %x[height], x10, LT\n"
+      "sub x10, x10, #0x1\n"
+      "whilelt p10.b, XZR, %x[height]\n"
+      "whilelt p9.b, x16, %x[height]\n"
+      "whilelt p8.b, x15, %x[height]\n"
+      "zip1 p10.b, p10.b, p8.b\n"
+      "whilelt p8.b, x14, %x[height]\n"
+      "zip1 p9.b, p9.b, p8.b\n"
+      "mov x9, %x[row_offset]\n"
+      "mov x28, %x[out]\n"
+      "zip1 p10.b, p10.b, p9.b\n"
+      "cbnz %x[first], 1f\n"
+      "addvl x28, x28, #-4\n"
+      "ld1w { z23.s }, p2/Z, [x28]\n"
+      "ld1w { z22.s }, p2/Z, [x28, #1, MUL VL]\n"
+      "ld1w { z21.s }, p2/Z, [x28, #2, MUL VL]\n"
+      "ld1w { z20.s }, p2/Z, [x28, #3, MUL VL]\n"
+      "1:"  // Initialise row sums: End
+      "mov x27, #0x0\n"
+      "whilelt p9.b, x27, %x[width]\n"
+      "whilelt p8.b, x27, %x[width]\n"
+      "2:"  // Width loop
+      "mov x13, #0x0\n"
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x16, LSL #3\n"
+      "add x24, %x[in], x15, LSL #3\n"
+      "add x23, %x[in], x14, LSL #3\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x19, [x24], #0x8\n"
+      "ldr x20, [x23], #0x8\n"
+      "cbz x10, 4f\n"
+      "3:"  // Loads: Loop
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe00922c0  // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe00922a1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "ldr x22, [x26], #0x8\n"
+      ".inst 0xe0092662  // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x19, [x24], #0x8\n"
+      ".inst 0xe0092283  // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+      "ldr x20, [x23], #0x8\n"
+      "add x13, x13, #0x4\n"
+      "cmp x13, x10, LSL #2\n"
+      "blt 3b\n"
+      "4:"  // Loads: Tail
+      ".inst 0x25256140  // dup p0.b, p8.b/Z, p10.b[w13]\n"
+      ".inst 0xe00922c0  // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+      ".inst 0x252d6140  // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+      ".inst 0x25356141  // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+      ".inst 0xe00922a1  // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+      ".inst 0x253d6140  // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+      "mov x12, #0x0\n"
+      ".inst 0xe0092662  // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+      "sub x19, %x[width], x27\n"
+      "cmp x19, x11\n"
+      ".inst 0xe0092283  // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+      "csel x19, x19, x11, LT\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "5:"  // Stores: Loop
+      ".inst 0xc0828813  // mova z19.s, p2/M, za0v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xe0bf8380  // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+      ".inst 0xc0828892  // mova z18.s, p2/M, za1v.s[x12]\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xc0828911  // mova z17.s, p2/M, za2v.s[x12]\n"
+      ".inst 0xe0b08384  // st1w { za1v.s[x12] }, p0/Z, [x28, x16, LSL #2]\n"
+      ".inst 0x25306d21  // dup p1.s, p11.s/Z, p9.s[w12]\n"
+      ".inst 0xc0828990  // mova z16.s, p2/M, za3v.s[x12]\n"
+      "udot z23.s, z19.b, z24.b\n"
+      ".inst 0x25306d20  // dup p0.s, p11.s/Z, p9.s[w12]\n"
+      "udot z22.s, z18.b, z24.b\n"
+      ".inst 0xe0af8788  // st1w { za2v.s[x12] }, p1/Z, [x28, x15, LSL #2]\n"
+      "udot z21.s, z17.b, z24.b\n"
+      "udot z20.s, z16.b, z24.b\n"
+      ".inst 0xe0ae838c  // st1w { za3v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+      "addvl x28, x28, #4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "blt 5b\n"
+      "incb x9\n"
+      "incb x27\n"
+      "whilelt p9.b, x27, %x[width]\n"
+      "whilelt p8.b, x27, %x[width]\n"
+      "b.any 2b\n"
+      "st1w { z23.s }, p2, [x28]\n"
+      "st1w { z22.s }, p2, [x28, #1, MUL VL]\n"
+      "st1w { z21.s }, p2, [x28, #2, MUL VL]\n"
+      "st1w { z20.s }, p2, [x28, #3, MUL VL]\n"
+      "addvl x28, x28, #4\n"
+      "mov %x[out], x28\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
new file mode 100644
index 0000000..141ab00
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 1, VLType::SME, false>(
+  float * &out, const float * const *in,
+  size_t width, size_t height, size_t row_offset, bool first
+)
+{
+  ARM_COMPUTE_UNUSED(first);
+
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x14\n"
+      "cntw x13, ALL, MUL #2\n"
+      "cntw x11, ALL, MUL #3\n"
+      "ptrue p3.s\n"
+      "cntw x10\n"
+      "cmp %x[height], x10\n"
+      "csel x10, %x[height], x10, LT\n"
+      "sub x10, x10, #0x1\n"
+      "whilelt p2.s, XZR, %x[height]\n"
+      "whilelt p15.s, x14, %x[height]\n"
+      "whilelt p14.s, x13, %x[height]\n"
+      "whilelt p13.s, x11, %x[height]\n"
+      "mov x9, %x[row_offset]\n"
+      "mov x28, %x[out]\n"
+      "mov x27, #0x0\n"
+      "whilelt p12.s, x27, %x[width]\n"
+      "whilelt p11.s, x27, %x[width]\n"
+      "whilelt p10.s, x27, %x[width]\n"
+      "whilelt p9.s, x27, %x[width]\n"
+      "whilelt p8.s, x27, %x[width]\n"
+      "1:"  // Width loop
+      "mov x12, #0x0\n"
+      "add x26, %x[in], XZR, LSL #3\n"
+      "add x25, %x[in], x14, LSL #3\n"
+      "add x24, %x[in], x13, LSL #3\n"
+      "add x23, %x[in], x11, LSL #3\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x20, [x24], #0x8\n"
+      "ldr x19, [x23], #0x8\n"
+      "cbz x10, 3f\n"
+      "2:"  // Loads: Loop
+      ".inst 0x25306c40  // dup p0.s, p11.s/Z, p2.s[w12]\n"
+      ".inst 0xe08902c0  // ld1w { za0h.s[x12] }, p0/Z, [x22, x9, LSL #2]\n"
+      ".inst 0x253069e0  // dup p0.s, p10.s/Z, p15.s[w12]\n"
+      ".inst 0xe08902a4  // ld1w { za1h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+      ".inst 0x253065c0  // dup p0.s, p9.s/Z, p14.s[w12]\n"
+      ".inst 0xe0890288  // ld1w { za2h.s[x12] }, p0/Z, [x20, x9, LSL #2]\n"
+      ".inst 0x253061a0  // dup p0.s, p8.s/Z, p13.s[w12]\n"
+      ".inst 0xe089026c  // ld1w { za3h.s[x12] }, p0/Z, [x19, x9, LSL #2]\n"
+      "ldr x22, [x26], #0x8\n"
+      "ldr x21, [x25], #0x8\n"
+      "ldr x20, [x24], #0x8\n"
+      "ldr x19, [x23], #0x8\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x10\n"
+      "blt 2b\n"
+      "3:"  // Loads: Tail
+      ".inst 0x25306c40  // dup p0.s, p11.s/Z, p2.s[w12]\n"
+      ".inst 0xe08902c0  // ld1w { za0h.s[x12] }, p0/Z, [x22, x9, LSL #2]\n"
+      ".inst 0x253069e0  // dup p0.s, p10.s/Z, p15.s[w12]\n"
+      ".inst 0xe08902a4  // ld1w { za1h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+      ".inst 0x253065c0  // dup p0.s, p9.s/Z, p14.s[w12]\n"
+      ".inst 0xe0890288  // ld1w { za2h.s[x12] }, p0/Z, [x20, x9, LSL #2]\n"
+      ".inst 0x253061a0  // dup p0.s, p8.s/Z, p13.s[w12]\n"
+      ".inst 0xe089026c  // ld1w { za3h.s[x12] }, p0/Z, [x19, x9, LSL #2]\n"
+      "mov x12, #0x0\n"
+      "sub x19, %x[width], x27\n"
+      "cmp x19, x14\n"
+      "csel x19, x19, x14, LT\n"
+      "4:"  // Stores: Loop
+      ".inst 0x25304d80  // dup p0.s, p3.s/Z, p12.s[w12]\n"
+      ".inst 0xe0bf8380  // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+      ".inst 0x25304d80  // dup p0.s, p3.s/Z, p12.s[w12]\n"
+      ".inst 0x25304d81  // dup p1.s, p3.s/Z, p12.s[w12]\n"
+      ".inst 0xe0ae8384  // st1w { za1v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+      ".inst 0x25304d80  // dup p0.s, p3.s/Z, p12.s[w12]\n"
+      ".inst 0xe0ad8788  // st1w { za2v.s[x12] }, p1/Z, [x28, x13, LSL #2]\n"
+      ".inst 0xe0ab838c  // st1w { za3v.s[x12] }, p0/Z, [x28, x11, LSL #2]\n"
+      "addvl x28, x28, #4\n"
+      "add x12, x12, #0x1\n"
+      "cmp x12, x19\n"
+      "blt 4b\n"
+      "incw x9\n"
+      "incw x27\n"
+      "whilelt p12.s, x27, %x[width]\n"
+      "whilelt p11.s, x27, %x[width]\n"
+      "whilelt p10.s, x27, %x[width]\n"
+      "whilelt p9.s, x27, %x[width]\n"
+      "whilelt p8.s, x27, %x[width]\n"
+      "b.any 1b\n"
+      "mov %x[out], x28\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [out] "+&r" (out)
+      : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
new file mode 100644
index 0000000..468915a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "asmlib.hpp"
+#include "convolution_parameters.hpp"
+#include "convolver.hpp"
+#include "interleave_indirect.hpp"
+#include "bfloat.hpp"
+
+#include <alloca.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <tuple>
+#include <type_traits>
+#include <vector>
+
+#include <arm_neon.h>
+
+#include "utils.hpp"
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME
+namespace arm_gemm {
+
+#include "interleave_indirect_impl.hpp"
+
+#include "indirect-interleaves/list-sve.hpp"
+
+/**** Instantiate needed implementations ****/
+
+/* FP32: SME implementations (height 1VL, 2VL, 4VL) */
+template void IndirectInterleave<2, 1, VLType::SME>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 1, VLType::SME>(float *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 1, VLType::SME>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<1, 1, VLType::SME>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 1, VLType::SME>(float *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 1, VLType::SME>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 1, VLType::SME>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 1, VLType::SME>(float *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 1, VLType::SME>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* BF16: SME implementations (height 1VL, 2VL, 4VL) */
+template void IndirectInterleave<2, 2, VLType::SME>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<1, 2, VLType::SME>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 2, VLType::SME>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 2, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* BF16: SME implementations narrow accumulators (no blocking) (height 1VL, 2VL) */
+template void IndirectInterleave<2, 1, VLType::SME>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 1, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 1, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<1, 1, VLType::SME>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 1, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 1, VLType::SME>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* FP16: SME implementations narrow accumulators (no blocking) (height 1VL, 2VL) */
+template void IndirectInterleave<2, 1, VLType::SME>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<1, 1, VLType::SME>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* FP32 fast-mode: SME implementations */
+template void IndirectInterleave<1, 2, VLType::SME>(bfloat16 *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 2, VLType::SME>(bfloat16 *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 2, VLType::SME>(bfloat16 *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<2, 2, VLType::SME>(bfloat16 *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 2, VLType::SME>(bfloat16 *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 2, VLType::SME>(bfloat16 *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 2, VLType::SME>(bfloat16 *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 2, VLType::SME>(bfloat16 *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 2, VLType::SME>(bfloat16 *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* INT8: SME implementation (height 1VL, 2VL, 4VL) */
+template void IndirectInterleave<1, 4, VLType::SME>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 4, VLType::SME>(int8_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 4, VLType::SME>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<2, 4, VLType::SME>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 4, VLType::SME>(int8_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 4, VLType::SME>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 4, VLType::SME>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 4, VLType::SME>(int8_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 4, VLType::SME>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+/* UINT8: SME implementation (height 1VL, 2VL, 4VL) */
+template void IndirectInterleave<1, 4, VLType::SME>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<2, 4, VLType::SME>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 4, VLType::SME>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 4, VLType::SME>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect_impl.hpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect_impl.hpp
new file mode 100644
index 0000000..4f25da2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect_impl.hpp
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+// Implementations of interleave functions
+// These must be included with a "namespace arm_gemm" block.
+
+/*
+ * Core function that does heavy lifting - interleave 'int_by' rows of width 'width' together.
+ *
+ * 'height' indicates the actual number of rows to interleave, so if it's less than int_by then the remaining
+ * entries are padded (note that this is "GEMM" padding rather than convolution padding, so there is no need to pad
+ * with a particular value.
+ *
+ * Note that it is not expected for this templated version to ever be used - all cases that matter should be
+ * explicitly specialized with an optimized implementation.
+ */
+template<unsigned int height_vectors, unsigned int block, VLType vlt, bool integrate_sums, typename TIn, typename TOut>
+void interleave_block( TOut * &out, const TIn * const *in, size_t width, size_t height, size_t row_offset, bool first) {
+    const unsigned int int_by = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block :
+                                                  (vlt == VLType::SME ? sme::get_vector_length<TOut>() / block : 1 ));
+
+    std::vector<int32_t> the_sums;
+
+    if (integrate_sums) {
+        the_sums = std::vector<int32_t>(int_by, 0);
+
+        if (!first) {
+            // In 'integrate sums' mode, we dump the sums at the end on each pass.
+
+            // On the last pass this is correct, but on other passes it is not -
+            // so on the subsequent pass we need to take the output written by
+            // the previous pass as starting point for the sums, and then
+            // overwrite them with new interleaved data.
+            int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
+
+            // Rewind pointer to where we wrote out the sums last time.
+            out_int32 -= int_by;
+
+            // Restore the running sums.
+            memcpy(the_sums.data(), out_int32, int_by * sizeof(int32_t));
+
+            // Update the "real" pointer so that the next output will clobber the old sums.
+            out = reinterpret_cast<TOut *>(out_int32);
+        }
+    }
+
+    for (unsigned int pos=0; pos<width; pos+=block) {
+        for (unsigned int row=0; row<int_by; row++) {
+            // Row out of range - pad 'block' entries.
+            if (row >= height) {
+                for (unsigned int col=0; col<block; col++) {
+                    *out++ = 0;
+                }
+                continue;
+            }
+
+            for (unsigned int col=0; col<block; col++) {
+                // Column out of range - pad a single entry
+                if (pos + col >= width) {
+                    *out++ = 0;
+                    continue;
+                }
+
+                if (integrate_sums) {
+                    the_sums[row] += in[row][row_offset + pos + col];
+                }
+
+                *out++ = in[row][row_offset + pos + col];
+            }
+        }
+    }
+
+    if (integrate_sums) {
+        int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
+
+        memcpy(out_int32, the_sums.data(), int_by * sizeof(int32_t));
+
+        out = reinterpret_cast<TOut *>(out_int32 + int_by);
+    }
+}
+
+template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TOut>
+inline void FixupRowSums(TOut * &out, const int32_t row_sum_multiplier) {
+    const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block :
+                                                  (vlt == VLType::SME ? sme::get_vector_length<TOut>() / block : 1 ));
+
+    // If we are integrating row sums, we need to do some fix up, depending on whether the multiplier is non-zero or not.
+    if (row_sum_multiplier) {
+        // Non-zero: interleave_block<>() will have done the sums, so 'out' will point to the start of the
+        // next block (post sums).
+        // We need to go back and apply the multiplier to the computed sums.  We don't need to change 'out'.
+        int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
+
+        out_int32 -= height;
+        for (unsigned int i=0; i<height; i++) {
+            out_int32[i] *= row_sum_multiplier;
+        }
+    } else {
+        // Zero: interleave_block<>() will *not* have done the sums, so 'out' will point to the start of the
+        // sum block.  We need to insert the (zero) sums, and advance 'out'.
+        int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
+
+        for (unsigned int i=0; i<height; i++) {
+            out_int32[i] = 0;
+        }
+
+        out_int32 += height;
+
+        out = reinterpret_cast<TOut *>(out_int32);
+    }
+}
+
+template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
+void IndirectInterleave(TOut *out, const TIn * const * const *ptr, unsigned int stringlen,
+                        unsigned int rounded_stringlen, const unsigned int y0, const unsigned int ymax,
+                        const unsigned int k0, const unsigned int kmax, bool integrate_sums,
+                        const int32_t row_sum_multiplier) {
+    const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block :
+                                                  (vlt == VLType::SME ? sme::get_vector_length<TOut>() / block : 1 ));
+
+    // 'interleave_block' implementations are entitled to read a pointer for each row they handle from the input
+    // pointer array, even for out of range rows (although they must not subsequently dereference those pointers for
+    // out of range rows).  This allows interleave_block to use techniques like row predication, or loading all
+    // pointers and conditionally overriding the out of range ones.
+
+    // This is problematic in the "pure" indirect case when we get to the last rows, where it can lead to out of
+    // range reads.  Avoid this with a local buffer to use in last-rows cases.  Use alloca as a std::vector can be
+    // expensive in highly threaded scenarios.
+    const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
+
+    // Figure out the starting position based on k0 (with rounded length)
+    unsigned int start_string      = k0 / rounded_stringlen;
+    unsigned int start_stringpos   = k0 % rounded_stringlen;
+
+    // Process blocks of 'height' height...
+    for (unsigned int ybase = y0; ybase < ymax; ybase+=height) {
+        // Height to process
+        unsigned int active_height = std::min(ymax - ybase, height);
+
+        // Track our progress through the various strings
+        unsigned int k_left    = (kmax - k0);
+        unsigned int string    = start_string;
+        unsigned int stringpos = start_stringpos;
+
+        bool first = true;
+
+        // Prepare to call 'interleave_block' above for each string encompassed by K range
+        while (k_left > 0) {
+            // Width to process - and the width we will generate (with padding)
+            unsigned int in_width   = std::min(k_left, stringlen - stringpos);
+            unsigned int out_width  = std::min(k_left, rounded_stringlen - stringpos);
+
+            const TIn * const *row_base = ptr[string] + ybase;
+
+            // If not all rows are valid, copy the ones that are into local array (see above comment).
+            if (active_height < height) {
+                for (unsigned int i=0; i<active_height; i++) {
+                    row_ptrs[i] = ptr[string][ybase + i];
+                }
+
+                row_base = row_ptrs;
+            }
+
+            // 'integrate_sums' is a function parameter rather than a template parameter to prevent duplicating too
+            // much code.  However, integrated sums make no sense for non-integral types and won't ever be
+            // requested.  So put a type trait check here to avoid generating pointless code.
+            if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
+                interleave_block<height_vectors, block, vlt, true>(out, row_base, in_width, active_height, stringpos, first);
+            } else {
+                interleave_block<height_vectors, block, vlt, false>(out, row_base, in_width, active_height, stringpos, first);
+            }
+
+            k_left -= out_width;
+            string++;
+            stringpos=0;
+            first=false;
+        }
+
+        if (std::is_integral<TOut>::value && integrate_sums) {
+            FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
+        }
+    }
+}
+
+template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
+void ConvolutionInterleave(TOut *out, const TIn *in, size_t in_stride, const convolver<TIn> &conv, const unsigned int rounded_stringlen,
+        const unsigned int y0, const unsigned int ymax, const unsigned int k0, const unsigned int kmax, bool integrate_sums, const int32_t row_sum_multiplier) {
+    const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block :
+                                                  (vlt == VLType::SME ? sme::get_vector_length<TOut>() / block : 1 ));
+    auto conv_cols = conv.process_columns(in, in_stride, k0, kmax, rounded_stringlen);
+
+    // Use alloca here as a std::vector can be expensive in highly threaded scenarios.
+    const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
+
+    for (unsigned int ybase = y0; ybase < ymax; ybase += height) {
+        // How many of the rows are active - the rest will get padded in interleave_block.
+        unsigned int active_height   = std::min(ymax - ybase, height);
+        bool first = true;
+
+        auto conv_rows = conv_cols.process_rows(ybase, active_height);
+
+        while (!conv_rows.finished()) {
+            unsigned int width, offset;
+
+            // Get next set of parameters
+            std::tie(width, offset) = conv_rows.next_block(row_ptrs);
+
+            // Perform the interleave
+            if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
+                interleave_block<height_vectors, block, vlt, true>(out, row_ptrs, width, active_height, offset, first);
+            } else {
+                interleave_block<height_vectors, block, vlt, false>(out, row_ptrs, width, active_height, offset, first);
+            }
+
+            first=false;
+        }
+
+        if (std::is_integral<TOut>::value && integrate_sums) {
+            FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
+        }
+    }
+}
+
+template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
+void Interleave(TOut *out, const TIn *in, size_t in_stride, const unsigned int y0, const unsigned int ymax, const unsigned int k0, const unsigned int kmax, bool integrate_sums, const int32_t row_sum_multiplier) {
+    const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block :
+                                                  (vlt == VLType::SME ? sme::get_vector_length<TOut>() / block : 1 ));
+    // Use alloca here as a std::vector can be expensive in highly threaded scenarios.
+    const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
+
+    const unsigned int width=kmax-k0;
+
+    for (unsigned int y=y0; y<ymax; y+=height) {
+        for (unsigned int r=0; r<height; r++) {
+            row_ptrs[r] = in + ((y + r) * in_stride);
+        }
+
+        if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
+            interleave_block<height_vectors, block, vlt, true>(out, row_ptrs, width, std::min(height, ymax-y), k0, true);
+        } else {
+            interleave_block<height_vectors, block, vlt, false>(out, row_ptrs, width, std::min(height, ymax-y), k0, true);
+        }
+
+        if (std::is_integral<TOut>::value && integrate_sums) {
+            FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
+        }
+    }
+}
diff --git a/src/core/NEON/kernels/arm_gemm/kernel_traits.hpp b/src/core/NEON/kernels/arm_gemm/kernel_traits.hpp
new file mode 100644
index 0000000..24c304a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernel_traits.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_gemm
+{
+
+namespace
+{
+  template <class T>
+  constexpr auto is_sme_impl(int)
+    -> decltype(T::is_sme(), std::true_type{})
+  {
+    return std::true_type{};
+  }
+
+  template <class>
+  constexpr auto is_sme_impl(...) -> std::false_type
+  {
+    return std::false_type{};
+  }
+}
+
+template <class T>
+struct is_sme
+{
+  static constexpr auto value = std::is_same<decltype(is_sme_impl<T>(0)),
+                                             std::true_type>::value;
+};
+
+}  // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL.hpp
new file mode 100644
index 0000000..f86bceb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL.hpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+#include "../std_transforms_sme.hpp"
+#include "../bfloat.hpp"
+
+#define ARGLIST  \
+    const bfloat16 *, const bfloat16 *, \
+    float *, size_t, size_t, \
+    const float *, Activation, bool
+
+namespace arm_gemm
+{
+void sme2_gemv_bf16fp32_dot_16VL( ARGLIST );
+
+class cls_sme2_gemv_bf16fp32_dot_16VL
+{
+public:
+    typedef bfloat16 operand_type;
+    typedef float result_type;
+
+    typedef void (*kern_type)( ARGLIST );
+
+    static unsigned int out_width()
+    {
+        return sme::get_vector_length<float>() * 16;
+    }
+
+    static constexpr unsigned int k_unroll()
+    {
+        return 2;
+    }
+
+    static constexpr bool supports_accumulate()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_bias()
+    {
+        return true;
+    }
+
+    static constexpr bool supports_activation()
+    {
+        return true;
+    }
+
+
+    StdTransformsSME<operand_type, result_type, 1, 16, 2> transforms = {};
+
+
+    // Default to the generic kernel
+    kern_type kernel=sme2_gemv_bf16fp32_dot_16VL;
+    cls_sme2_gemv_bf16fp32_dot_16VL(const CPUInfo *)
+    {
+    }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
new file mode 100644
index 0000000..26861fb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
@@ -0,0 +1,554 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sme2_gemv_bf16fp32_dot_16VL (
+    const bfloat16 *A_ptr, const bfloat16 *B_ptr, float *output_ptr,
+    size_t N, size_t K,
+    const float *bias, Activation act, bool
+)
+{
+    struct KernelArgs {
+        float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+        float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+        const bfloat16 *B_ptr = {};
+        size_t output_offset = {};
+        unsigned int input_initial_col = {};
+    } ka;
+
+    unsigned long flags=0;
+    ka.B_ptr = B_ptr;
+    switch(act.type) {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            ka.maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            ka.minval = 0;
+            flags |= 0x2;
+            break;
+    }
+    __asm__ __volatile__(
+      "ptrue p1.b\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x27, ALL, MUL #4\n"
+      "add x26, %x[N], x27\n"
+      "sub x26, x26, #0x1\n"
+      "udiv x26, x26, x27\n"
+      "add x21, x26, #0x3\n"
+      "and x21, x21, #0xfffffffffffffffc\n"
+      "mul x21, x21, x27\n"
+      "mul x21, x21, %x[K]\n"
+      "mov x9, #0x0\n"
+      "mov x25, %x[B_ptr]\n"
+      "mov x24, %x[output_ptr]\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "lsl x21, x21, #0x1\n"
+      "mov x20, #0x1\n"
+      "1:"  // RHS size check loop
+      "cmp x21, #0x200000\n"
+      "blt 2f\n"
+      "tbnz x21, #0, 3f\n"
+      "lsr x21, x21, #0x1\n"
+      "lsl x20, x20, #0x1\n"
+      "b 1b\n"
+      "2:"  // RHS do prefetch
+      "lsl x19, x21, #0x26\n"
+      "sub x20, x20, #0x1\n"
+      "lsl x20, x20, #0x16\n"
+      "orr x21, x21, x19\n"
+      "orr x21, x21, x20\n"
+      ".inst 0xf8b54b3a  // rprfm pldonce, x21, [x25]\n"
+      "3:"  // RHS prefetch exit
+      "mov x23, %x[bias]\n"
+      "4:"  // Column loop
+      "cmp x26, #0x4\n"
+      "bge 28f\n"
+      "cmp x26, #0x2\n"
+      "bgt 20f\n"
+      "beq 12f\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x1\n"
+      "mov x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 5f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      "b 6f\n"
+      "5:"  // Width 1: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "6:"  // Width 1: setup done
+      "cmp x20, #0x8\n"
+      "ble 8f\n"
+      "7:"  // Width 1: Multiply loop: Main loop head
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x8\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "cmp x20, #0x8\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 7b\n"
+      "8:"  // Width 1: Multiply loop: Single iteration only
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "9:"  // Width 1: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 10f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "b 11f\n"
+      "10:"  // Width 1: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "11:"  // Width 1: Output done
+      "b 36f\n"
+      "12:"  // Width 2
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x1\n"
+      "sub x19, %x[N], x27\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 13f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      "b 14f\n"
+      "13:"  // Width 2: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "14:"  // Width 2: setup done
+      "cmp x20, #0x8\n"
+      "ble 16f\n"
+      "15:"  // Width 2: Multiply loop: Main loop head
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x8\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      "cmp x20, #0x8\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 15b\n"
+      "16:"  // Width 2: Multiply loop: Single iteration only
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "17:"  // Width 2: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 18f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xa061c314  // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "b 19f\n"
+      "18:"  // Width 2: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c314  // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "19:"  // Width 2: Output done
+      "b 36f\n"
+      "20:"  // Width 3
+      "mov x19, #0x2\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x1\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 21f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      ".inst 0xa042c6e4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042c82  // mova za.d[x9, #2], { z4.d-z7.d }\n"
+      "b 22f\n"
+      "21:"  // Width 3: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "22:"  // Width 3: setup done
+      "cmp x20, #0x8\n"
+      "ble 24f\n"
+      "23:"  // Width 3: Multiply loop: Main loop head
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x8\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      "cmp x20, #0x8\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      ".inst 0xa042a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab29a  // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      ".inst 0xa042a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab71a  // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      ".inst 0xa042a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abb9a  // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abe1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 23b\n"
+      "24:"  // Width 3: Multiply loop: Single iteration only
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      ".inst 0xa042a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab29a  // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      ".inst 0xa042a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab71a  // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      ".inst 0xa042a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abb9a  // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abe1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "25:"  // Width 3: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 26f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1a6c810  // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
+      ".inst 0xa062c310  // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "b 27f\n"
+      "26:"  // Width 3: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c310  // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "27:"  // Width 3: Output done
+      "b 36f\n"
+      "28:"  // Width 4
+      "mov x19, #0x3\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x1\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 29f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      ".inst 0xa042c6e4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042c82  // mova za.d[x9, #2], { z4.d-z7.d }\n"
+      ".inst 0xa043c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+      ".inst 0xc0042e03  // mova za.d[x9, #3], { z16.d-z19.d }\n"
+      "addvl x23, x23, #16\n"
+      "b 30f\n"
+      "29:"  // Width 4: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "30:"  // Width 4: setup done
+      "cmp x20, #0x8\n"
+      "ble 32f\n"
+      "31:"  // Width 4: Multiply loop: Main loop head
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x8\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      "cmp x20, #0x8\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      ".inst 0xa042a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab29a  // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15ab21b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      ".inst 0xa042a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab71a  // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15ab61b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      ".inst 0xa042a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abb9a  // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
+      ".inst 0xa043a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aba9b  // bfdot za.s[x9, 3], { z20.h-z23.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abe1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15abe1b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 31b\n"
+      "32:"  // Width 4: Multiply loop: Single iteration only
+      "whilelt p0.h, XZR, x20\n"
+      "ld1rqh { z10.h }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xa040a721  // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15ab018  // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab099  // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
+      ".inst 0xa042a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab29a  // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15ab21b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab718  // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15ab619  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
+      ".inst 0xa042a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15ab71a  // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15ab61b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc15ab998  // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
+      ".inst 0xa041a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aba19  // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
+      ".inst 0xa042a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abb9a  // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
+      ".inst 0xa043a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aba9b  // bfdot za.s[x9, 3], { z20.h-z23.h }, z10.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15abe18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa041a739  // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15abf19  // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15abe1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
+      ".inst 0xa043a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15abe1b  // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "33:"  // Width 4: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 34f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c78  // mova { z24.d-z27.d }, za.d[x9, #3]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1a6c810  // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
+      ".inst 0xa062c710  // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc1a6c818  // fclamp { z24.s-z27.s }, z0.s, z6.s\n"
+      ".inst 0xa063c318  // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "b 35f\n"
+      "34:"  // Width 4: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c710  // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc0062c78  // mova { z24.d-z27.d }, za.d[x9, #3]\n"
+      ".inst 0xa063c318  // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "35:"  // Width 4: Output done
+      "subs x26, x26, #0x4\n"
+      "sub %x[N], %x[N], x27, LSL #2\n"
+      "bgt 4b\n"
+      "36:"  // Exit
+      ".inst 0xd503467f  // SMSTOP\n"
+      "ptrue p1.b\n"
+      : [N] "+&r" (N)
+      : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL.hpp
new file mode 100644
index 0000000..f33cb9a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL.hpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+#include "../std_transforms_sme.hpp"
+
+#define ARGLIST  \
+    const float *, const float *, \
+    float *, size_t, size_t, \
+    const float *, Activation, bool
+
+namespace arm_gemm
+{
+void sme2_gemv_fp32_mla_16VL( ARGLIST );
+
+class cls_sme2_gemv_fp32_mla_16VL
+{
+public:
+    typedef float operand_type;
+    typedef float result_type;
+
+    typedef void (*kern_type)( ARGLIST );
+
+    static unsigned int out_width()
+    {
+        return sme::get_vector_length<float>() * 16;
+    }
+
+    static constexpr unsigned int k_unroll()
+    {
+        return 1;
+    }
+
+    static constexpr bool supports_accumulate()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_bias()
+    {
+        return true;
+    }
+
+    static constexpr bool supports_activation()
+    {
+        return true;
+    }
+
+
+    StdTransformsSME<operand_type, result_type, 1, 16, 1> transforms = {};
+
+
+    // Default to the generic kernel
+    kern_type kernel=sme2_gemv_fp32_mla_16VL;
+    cls_sme2_gemv_fp32_mla_16VL(const CPUInfo *)
+    {
+    }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
new file mode 100644
index 0000000..4c0ae2c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sme2_gemv_fp32_mla_16VL (
+    const float *A_ptr, const float *B_ptr, float *output_ptr,
+    size_t N, size_t K,
+    const float *bias, Activation act, bool
+)
+{
+    struct KernelArgs {
+        float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+        float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+        const float *B_ptr = {};
+        size_t output_offset = {};
+        unsigned int input_initial_col = {};
+    } ka;
+
+    unsigned long flags=0;
+    ka.B_ptr = B_ptr;
+    switch(act.type) {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            ka.maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            ka.minval = 0;
+            flags |= 0x2;
+            break;
+    }
+    __asm__ __volatile__(
+      "ptrue p1.b\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x27, ALL, MUL #4\n"
+      "add x26, %x[N], x27\n"
+      "sub x26, x26, #0x1\n"
+      "udiv x26, x26, x27\n"
+      "add x21, x26, #0x3\n"
+      "and x21, x21, #0xfffffffffffffffc\n"
+      "mul x21, x21, x27\n"
+      "mul x21, x21, %x[K]\n"
+      "mov x9, #0x0\n"
+      "mov x25, %x[B_ptr]\n"
+      "mov x24, %x[output_ptr]\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "lsl x21, x21, #0x2\n"
+      "mov x20, #0x1\n"
+      "1:"  // RHS size check loop
+      "cmp x21, #0x200000\n"
+      "blt 2f\n"
+      "tbnz x21, #0, 3f\n"
+      "lsr x21, x21, #0x1\n"
+      "lsl x20, x20, #0x1\n"
+      "b 1b\n"
+      "2:"  // RHS do prefetch
+      "lsl x19, x21, #0x26\n"
+      "sub x20, x20, #0x1\n"
+      "lsl x20, x20, #0x16\n"
+      "orr x21, x21, x19\n"
+      "orr x21, x21, x20\n"
+      ".inst 0xf8b54b3a  // rprfm pldonce, x21, [x25]\n"
+      "3:"  // RHS prefetch exit
+      "mov x23, %x[bias]\n"
+      "4:"  // Column loop
+      "cmp x26, #0x4\n"
+      "bge 28f\n"
+      "cmp x26, #0x2\n"
+      "bgt 20f\n"
+      "beq 12f\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "mov x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 5f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      "b 6f\n"
+      "5:"  // Width 1: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "6:"  // Width 1: setup done
+      "cmp x20, #0x4\n"
+      "ble 8f\n"
+      "7:"  // Width 1: Multiply loop: Main loop head
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x4\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      "cmp x20, #0x4\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 7b\n"
+      "8:"  // Width 1: Multiply loop: Single iteration only
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "9:"  // Width 1: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 10f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "b 11f\n"
+      "10:"  // Width 1: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "11:"  // Width 1: Output done
+      "b 36f\n"
+      "12:"  // Width 2
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "sub x19, %x[N], x27\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 13f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      "b 14f\n"
+      "13:"  // Width 2: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "14:"  // Width 2: setup done
+      "cmp x20, #0x4\n"
+      "ble 16f\n"
+      "15:"  // Width 2: Multiply loop: Main loop head
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x4\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      "cmp x20, #0x4\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 15b\n"
+      "16:"  // Width 2: Multiply loop: Single iteration only
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "17:"  // Width 2: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 18f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xa061c314  // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "b 19f\n"
+      "18:"  // Width 2: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c314  // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "19:"  // Width 2: Output done
+      "b 36f\n"
+      "20:"  // Width 3
+      "mov x19, #0x2\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 21f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      ".inst 0xa042c6e4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042c82  // mova za.d[x9, #2], { z4.d-z7.d }\n"
+      "b 22f\n"
+      "21:"  // Width 3: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "22:"  // Width 3: setup done
+      "cmp x20, #0x4\n"
+      "ble 24f\n"
+      "23:"  // Width 3: Multiply loop: Main loop head
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x4\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      "cmp x20, #0x4\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      ".inst 0xa042c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa282  // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      ".inst 0xa042c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa702  // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      ".inst 0xa042c73d  // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aab82  // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      ".inst 0xa042c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aae02  // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 23b\n"
+      "24:"  // Width 3: Multiply loop: Single iteration only
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      ".inst 0xa042c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa282  // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      ".inst 0xa042c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa702  // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      ".inst 0xa042c73d  // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aab82  // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      ".inst 0xa042c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aae02  // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "25:"  // Width 3: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 26f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1a6c810  // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
+      ".inst 0xa062c310  // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "b 27f\n"
+      "26:"  // Width 3: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c310  // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "27:"  // Width 3: Output done
+      "b 36f\n"
+      "28:"  // Width 4
+      "mov x19, #0x3\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 29f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6e8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042d01  // mova za.d[x9, #1], { z8.d-z11.d }\n"
+      ".inst 0xa042c6e4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042c82  // mova za.d[x9, #2], { z4.d-z7.d }\n"
+      ".inst 0xa043c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+      ".inst 0xc0042e03  // mova za.d[x9, #3], { z16.d-z19.d }\n"
+      "addvl x23, x23, #16\n"
+      "b 30f\n"
+      "29:"  // Width 4: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "30:"  // Width 4: setup done
+      "cmp x20, #0x4\n"
+      "ble 32f\n"
+      "31:"  // Width 4: Multiply loop: Main loop head
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "sub x20, x20, #0x4\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      "cmp x20, #0x4\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      ".inst 0xa042c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa282  // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aa203  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      ".inst 0xa042c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa702  // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aa603  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      ".inst 0xa042c73d  // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aab82  // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
+      ".inst 0xa043c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aaa83  // fmla za.s[x9, 3], { z20.s-z23.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      ".inst 0xa042c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aae02  // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aae03  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "bgt 31b\n"
+      "32:"  // Width 4: Multiply loop: Single iteration only
+      "whilelt p0.s, XZR, x20\n"
+      "ld1rqw { z10.s }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xa040c721  // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc15aa000  // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
+      ".inst 0xa041c725  // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa081  // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
+      ".inst 0xa042c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa282  // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aa203  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa700  // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aa601  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
+      ".inst 0xa042c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aa702  // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aa603  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040c72d  // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      ".inst 0xc15aa980  // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
+      ".inst 0xa041c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaa01  // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
+      ".inst 0xa042c73d  // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aab82  // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
+      ".inst 0xa043c735  // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aaa83  // fmla za.s[x9, 3], { z20.s-z23.s }, z10.s[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
+      ".inst 0xc15aae00  // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa041c739  // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc15aaf01  // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
+      ".inst 0xa042c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc15aae02  // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
+      ".inst 0xa043c731  // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc15aae03  // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[3]\n"
+      "addvl x25, x25, #16\n"
+      "33:"  // Width 4: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 34f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z0.s }, p1/Z, [x20]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      "ld1rw { z6.s }, p1/Z, [x19]\n"
+      ".inst 0xc1a6c808  // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1a6c814  // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
+      ".inst 0xc0062c78  // mova { z24.d-z27.d }, za.d[x9, #3]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1a6c810  // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
+      ".inst 0xa062c710  // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc1a6c818  // fclamp { z24.s-z27.s }, z0.s, z6.s\n"
+      ".inst 0xa063c318  // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "b 35f\n"
+      "34:"  // Width 4: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c34  // mova { z20.d-z23.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c714  // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c50  // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c710  // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc0062c78  // mova { z24.d-z27.d }, za.d[x9, #3]\n"
+      ".inst 0xa063c318  // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "35:"  // Width 4: Output done
+      "subs x26, x26, #0x4\n"
+      "sub %x[N], %x[N], x27, LSL #2\n"
+      "bgt 4b\n"
+      "36:"  // Exit
+      ".inst 0xd503467f  // SMSTOP\n"
+      "ptrue p1.b\n"
+      : [N] "+&r" (N)
+      : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL.hpp
new file mode 100644
index 0000000..f52fbcd
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL.hpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+#include "../std_transforms_sme.hpp"
+#include "../bfloat.hpp"
+
+#define ARGLIST  \
+    const float *, const bfloat16 *, \
+    float *, size_t, size_t, \
+    const float *, Activation, bool
+
+namespace arm_gemm
+{
+void sme2_gemv_fp32bf16fp32_dot_16VL( ARGLIST );
+
+class cls_sme2_gemv_fp32bf16fp32_dot_16VL
+{
+public:
+    typedef bfloat16 operand_type;
+    typedef float result_type;
+
+    typedef void (*kern_type)( ARGLIST );
+
+    static unsigned int out_width()
+    {
+        return sme::get_vector_length<float>() * 16;
+    }
+
+    static constexpr unsigned int k_unroll()
+    {
+        return 2;
+    }
+
+    static constexpr bool supports_accumulate()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_bias()
+    {
+        return true;
+    }
+
+    static constexpr bool supports_activation()
+    {
+        return true;
+    }
+
+
+    StdTransformsSME<operand_type, result_type, 1, 16, 2> transforms = {};
+
+
+    // Default to the generic kernel
+    kern_type kernel=sme2_gemv_fp32bf16fp32_dot_16VL;
+    cls_sme2_gemv_fp32bf16fp32_dot_16VL(const CPUInfo *)
+    {
+    }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
new file mode 100644
index 0000000..8b8bcb6
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sme2_gemv_fp32bf16fp32_dot_16VL (
+    const float *A_ptr, const bfloat16 *B_ptr, float *output_ptr,
+    size_t N, size_t K,
+    const float *bias, Activation act, bool
+)
+{
+    struct KernelArgs {
+        float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+        float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+        const bfloat16 *B_ptr = {};
+        size_t output_offset = {};
+        unsigned int input_initial_col = {};
+    } ka;
+
+    unsigned long flags=0;
+    ka.B_ptr = B_ptr;
+    switch(act.type) {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            ka.maxval = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            ka.minval = 0;
+            flags |= 0x2;
+            break;
+    }
+    __asm__ __volatile__(
+      "ptrue p2.b\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x28, ALL, MUL #4\n"
+      "add x27, %x[N], x28\n"
+      "sub x27, x27, #0x1\n"
+      "udiv x27, x27, x28\n"
+      "add x21, x27, #0x3\n"
+      "and x21, x21, #0xfffffffffffffffc\n"
+      "mul x21, x21, x28\n"
+      "mul x21, x21, %x[K]\n"
+      "mov x9, #0x0\n"
+      "mov x26, #0x4\n"
+      "mov x25, %x[B_ptr]\n"
+      "mov x24, %x[output_ptr]\n"
+      "ptrue p2.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "lsl x21, x21, #0x1\n"
+      "mov x20, #0x1\n"
+      "1:"  // RHS size check loop
+      "cmp x21, #0x200000\n"
+      "blt 2f\n"
+      "tbnz x21, #0, 3f\n"
+      "lsr x21, x21, #0x1\n"
+      "lsl x20, x20, #0x1\n"
+      "b 1b\n"
+      "2:"  // RHS do prefetch
+      "lsl x19, x21, #0x26\n"
+      "sub x20, x20, #0x1\n"
+      "lsl x20, x20, #0x16\n"
+      "orr x21, x21, x19\n"
+      "orr x21, x21, x20\n"
+      ".inst 0xf8b54b3a  // rprfm pldonce, x21, [x25]\n"
+      "3:"  // RHS prefetch exit
+      "mov x23, %x[bias]\n"
+      "4:"  // Column loop
+      "cmp x27, #0x4\n"
+      "bge 28f\n"
+      "cmp x27, #0x2\n"
+      "bgt 20f\n"
+      "beq 12f\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "mov x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 5f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      "b 6f\n"
+      "5:"  // Width 1: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "6:"  // Width 1: setup done
+      "cmp x20, #0x8\n"
+      "ble 8f\n"
+      "7:"  // Width 1: Multiply loop: Main loop head
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "sub x20, x20, #0x8\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "addvl x25, x25, #16\n"
+      "cmp x20, #0x8\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "addvl x25, x25, #16\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      "bgt 7b\n"
+      "8:"  // Width 1: Multiply loop: Single iteration only
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "subs x20, x20, #0x2\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 9f\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "9:"  // Width 1: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 10f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z29.s }, p2/Z, [x20]\n"
+      "ld1rw { z18.s }, p2/Z, [x19]\n"
+      ".inst 0xc1b2cba8  // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "b 11f\n"
+      "10:"  // Width 1: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c308  // st1w { z8.s-z11.s }, p8, [x24]\n"
+      "addvl x24, x24, #4\n"
+      "11:"  // Width 1: Output done
+      "b 36f\n"
+      "12:"  // Width 2
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "sub x19, %x[N], x28\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 13f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e01  // mova za.d[x9, #1], { z16.d-z19.d }\n"
+      "b 14f\n"
+      "13:"  // Width 2: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "14:"  // Width 2: setup done
+      "cmp x20, #0x8\n"
+      "ble 16f\n"
+      "15:"  // Width 2: Multiply loop: Main loop head
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "sub x20, x20, #0x8\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "cmp x20, #0x8\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      "bgt 15b\n"
+      "16:"  // Width 2: Multiply loop: Single iteration only
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "subs x20, x20, #0x2\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      "ble 17f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 17f\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "17:"  // Width 2: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 18f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z29.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      "ld1rw { z18.s }, p2/Z, [x19]\n"
+      ".inst 0xc1b2cba8  // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1b2cbac  // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
+      ".inst 0xa061c30c  // st1w { z12.s-z15.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "b 19f\n"
+      "18:"  // Width 2: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c30c  // st1w { z12.s-z15.s }, p8, [x24, #0x4, MUL VL]\n"
+      "addvl x24, x24, #8\n"
+      "19:"  // Width 2: Output done
+      "b 36f\n"
+      "20:"  // Width 3
+      "mov x19, #0x2\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "msub x19, x28, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 21f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e01  // mova za.d[x9, #1], { z16.d-z19.d }\n"
+      ".inst 0xa042c6fc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042f82  // mova za.d[x9, #2], { z28.d-z31.d }\n"
+      "b 22f\n"
+      "21:"  // Width 3: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "22:"  // Width 3: setup done
+      "cmp x20, #0x8\n"
+      "ble 24f\n"
+      "23:"  // Width 3: Multiply loop: Main loop head
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "sub x20, x20, #0x8\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "cmp x20, #0x8\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b21a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa042a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b59a  // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150ba1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150be1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
+      "bgt 23b\n"
+      "24:"  // Width 3: Multiply loop: Single iteration only
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "subs x20, x20, #0x2\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b21a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
+      "ble 25f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      ".inst 0xa042a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b59a  // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150ba1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 25f\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150be1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "25:"  // Width 3: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 26f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z29.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      "ld1rw { z18.s }, p2/Z, [x19]\n"
+      ".inst 0xc1b2cba8  // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
+      ".inst 0xc0062c44  // mova { z4.d-z7.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1b2cbac  // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
+      ".inst 0xa061c70c  // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1b2cba4  // fclamp { z4.s-z7.s }, z29.s, z18.s\n"
+      ".inst 0xa062c304  // st1w { z4.s-z7.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "b 27f\n"
+      "26:"  // Width 3: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c70c  // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c44  // mova { z4.d-z7.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c304  // st1w { z4.s-z7.s }, p8, [x24, #0x8, MUL VL]\n"
+      "addvl x24, x24, #12\n"
+      "27:"  // Width 3: Output done
+      "b 36f\n"
+      "28:"  // Width 4
+      "mov x19, #0x3\n"
+      "mov x22, %x[A_ptr]\n"
+      "lsl x21, %x[K], #0x2\n"
+      "msub x19, x28, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      ".inst 0x25b367f0  // whilelt p8.s, XZR, x19, VLx4\n"
+      "cbz x23, 29f\n"
+      ".inst 0xa040c6e0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
+      ".inst 0xc0042c00  // mova za.d[x9, #0], { z0.d-z3.d }\n"
+      ".inst 0xa041c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e01  // mova za.d[x9, #1], { z16.d-z19.d }\n"
+      ".inst 0xa042c6fc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042f82  // mova za.d[x9, #2], { z28.d-z31.d }\n"
+      ".inst 0xa043c6f0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+      ".inst 0xc0042e03  // mova za.d[x9, #3], { z16.d-z19.d }\n"
+      "addvl x23, x23, #16\n"
+      "b 30f\n"
+      "29:"  // Width 4: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "30:"  // Width 4: setup done
+      "cmp x20, #0x8\n"
+      "ble 32f\n"
+      "31:"  // Width 4: Multiply loop: Main loop head
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "sub x20, x20, #0x8\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "cmp x20, #0x8\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150b21a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b39b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[0]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa042a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150b59a  // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b79b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[1]\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      ".inst 0xa043a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150ba1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b99b  // bfdot za.s[x9, 3], { z12.h-z15.h }, z0.h[2]\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150be1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150bf9b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[3]\n"
+      "bgt 31b\n"
+      "32:"  // Width 4: Multiply loop: Single iteration only
+      "whilelt p1.s, XZR, x20\n"
+      "whilelt p0.s, x26, x20\n"
+      "ld1rqw { z0.s }, p1/Z, [x22]\n"
+      ".inst 0x658aa800  // bfcvt z0.h, p2/M, z0.s\n"
+      "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
+      ".inst 0x658aa96b  // bfcvt z11.h, p2/M, z11.s\n"
+      "uzp1 z0.h, z0.h, z0.h\n"
+      "subs x20, x20, #0x2\n"
+      "uzp1 z11.h, z11.h, z11.h\n"
+      "trn1 z0.d, z0.d, z11.d\n"
+      ".inst 0xa040a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+      "add x22, x22, #0x20\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b098  // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b119  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150b21a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xc150b39b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[0]\n"
+      "ble 33f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150b618  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
+      ".inst 0xa041a725  // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150b499  // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
+      ".inst 0xa042a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150b59a  // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150b79b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+      "subs x20, x20, #0x2\n"
+      ".inst 0xc150ba18  // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa041a735  // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150ba99  // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150ba1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
+      ".inst 0xa043a72d  // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150b99b  // bfdot za.s[x9, 3], { z12.h-z15.h }, z0.h[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 33f\n"
+      ".inst 0xa040a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+      ".inst 0xc150bf98  // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
+      ".inst 0xa041a729  // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc150bd19  // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
+      ".inst 0xa042a731  // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc150be1a  // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
+      ".inst 0xa043a73d  // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc150bf9b  // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[3]\n"
+      "addvl x25, x25, #16\n"
+      "33:"  // Width 4: Multiply loop: multiply skip
+      "tbz %x[flags], #1, 34f\n"
+      "add x20, %x[args_ptr], %[offset_min]\n"
+      "add x19, %x[args_ptr], %[offset_max]\n"
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      "ld1rw { z29.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      "ld1rw { z18.s }, p2/Z, [x19]\n"
+      ".inst 0xc1b2cba8  // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
+      ".inst 0xc0062c44  // mova { z4.d-z7.d }, za.d[x9, #2]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc1b2cbac  // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
+      ".inst 0xc0062c60  // mova { z0.d-z3.d }, za.d[x9, #3]\n"
+      ".inst 0xa061c70c  // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc1b2cba4  // fclamp { z4.s-z7.s }, z29.s, z18.s\n"
+      ".inst 0xa062c704  // st1w { z4.s-z7.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc1b2cba0  // fclamp { z0.s-z3.s }, z29.s, z18.s\n"
+      ".inst 0xa063c300  // st1w { z0.s-z3.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "b 35f\n"
+      "34:"  // Width 4: No activation
+      ".inst 0xc0062c08  // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+      ".inst 0xa060c708  // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xa061c70c  // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
+      ".inst 0xc0062c44  // mova { z4.d-z7.d }, za.d[x9, #2]\n"
+      ".inst 0xa062c704  // st1w { z4.s-z7.s }, pn9.b, [x24, #0x8, MUL VL]\n"
+      ".inst 0xc0062c60  // mova { z0.d-z3.d }, za.d[x9, #3]\n"
+      ".inst 0xa063c300  // st1w { z0.s-z3.s }, p8, [x24, #0xc, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      "35:"  // Width 4: Output done
+      "subs x27, x27, #0x4\n"
+      "sub %x[N], %x[N], x28, LSL #2\n"
+      "bgt 4b\n"
+      "36:"  // Exit
+      ".inst 0xd503467f  // SMSTOP\n"
+      "ptrue p2.b\n"
+      : [N] "+&r" (N)
+      : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL.hpp
new file mode 100644
index 0000000..4c9f9cf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL.hpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+#include "../std_transforms_sme.hpp"
+
+#define ARGLIST  \
+    const int8_t *, const int8_t *, \
+    int8_t *, size_t, size_t, \
+    const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+void sme2_gemv_s8qa_dot_16VL( ARGLIST );
+
+class cls_sme2_gemv_s8qa_dot_16VL
+{
+public:
+    typedef int8_t operand_type;
+    typedef int8_t result_type;
+
+    typedef void (*kern_type)( ARGLIST );
+
+    static unsigned int out_width()
+    {
+        return sme::get_vector_length<int32_t>() * 16;
+    }
+
+    static constexpr unsigned int k_unroll()
+    {
+        return 4;
+    }
+
+    static constexpr bool supports_accumulate()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_bias()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_activation()
+    {
+        return false;
+    }
+
+
+    StdTransformsSME<operand_type, result_type, 1, 16, 4> transforms = {};
+
+
+    // Default to the generic kernel
+    kern_type kernel=sme2_gemv_s8qa_dot_16VL;
+    cls_sme2_gemv_s8qa_dot_16VL(const CPUInfo *)
+    {
+    }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
new file mode 100644
index 0000000..348c709
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
@@ -0,0 +1,678 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void sme2_gemv_s8qa_dot_16VL (
+    const int8_t *A_ptr, const int8_t *B_ptr, int8_t *output_ptr,
+    size_t N, size_t K,
+    const Requantize32 *qp, const int32_t *col_bias, unsigned int col_base
+)
+{
+    ARM_COMPUTE_UNUSED(col_base);
+
+    struct KernelArgs {
+        const int8_t *B_ptr = {};
+        size_t output_offset = {};
+        unsigned int input_initial_col = {};
+    } ka;
+
+    unsigned long flags=0;
+    ka.B_ptr = B_ptr;
+    if (qp->c_offset > qp->minval) {
+        flags |= 0x20;
+    }
+    __asm__ __volatile__(
+      "ptrue p2.b\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x27, ALL, MUL #4\n"
+      "add x26, %x[N], x27\n"
+      "sub x26, x26, #0x1\n"
+      "udiv x26, x26, x27\n"
+      "add x21, x26, #0x3\n"
+      "and x21, x21, #0xfffffffffffffffc\n"
+      "mul x21, x21, x27\n"
+      "mov x9, #0x0\n"
+      "mov x25, %x[B_ptr]\n"
+      "mov x24, %x[output_ptr]\n"
+      "ptrue p2.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "mul x21, x21, %x[K]\n"
+      "mov x20, #0x1\n"
+      "1:"  // RHS size check loop
+      "cmp x21, #0x200000\n"
+      "blt 2f\n"
+      "tbnz x21, #0, 3f\n"
+      "lsr x21, x21, #0x1\n"
+      "lsl x20, x20, #0x1\n"
+      "b 1b\n"
+      "2:"  // RHS do prefetch
+      "lsl x19, x21, #0x26\n"
+      "sub x20, x20, #0x1\n"
+      "lsl x20, x20, #0x16\n"
+      "orr x21, x21, x19\n"
+      "orr x21, x21, x20\n"
+      ".inst 0xf8b54b3a  // rprfm pldonce, x21, [x25]\n"
+      "3:"  // RHS prefetch exit
+      "mov x23, %x[col_bias]\n"
+      "mov z26.s, #0x0\n"
+      "mov z24.b, #0x1\n"
+      "bic %x[flags], %x[flags], #0x80000000\n"
+      "4:"  // Column loop
+      "cmp x26, #0x4\n"
+      "bge 34f\n"
+      "cmp x26, #0x2\n"
+      "bgt 24f\n"
+      "beq 14f\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "mov x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 5f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      "b 6f\n"
+      "5:"  // Width 1: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "6:"  // Width 1: setup done
+      "cmp x20, #0x10\n"
+      "ble 9f\n"
+      "7:"  // Width 1: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 8f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "8:"  // Width 1: Multiply loop: unique 1: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 7b\n"
+      "9:"  // Width 1: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "10:"  // Width 1: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 11f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "11:"  // Width 1: Multiply loop: unique 2: skip row sum
+      "tbnz %x[flags], #31, 12f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "saddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "12:"  // Width 1: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p1, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "13:"  // Width 1: Output done
+      "b 44f\n"
+      "14:"  // Width 2
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "sub x19, %x[N], x27\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 15f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      "b 16f\n"
+      "15:"  // Width 2: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "16:"  // Width 2: setup done
+      "cmp x20, #0x10\n"
+      "ble 19f\n"
+      "17:"  // Width 2: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 18f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "18:"  // Width 2: Multiply loop: unique 3: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 17b\n"
+      "19:"  // Width 2: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "20:"  // Width 2: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 21f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "21:"  // Width 2: Multiply loop: unique 4: skip row sum
+      "tbnz %x[flags], #31, 22f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "saddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "22:"  // Width 2: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p1, [x24, #1, MUL VL]\n"
+      "addvl x24, x24, #2\n"
+      "23:"  // Width 2: Output done
+      "b 44f\n"
+      "24:"  // Width 3
+      "mov x19, #0x2\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 25f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      ".inst 0xa042c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042e82  // mova za.d[x9, #2], { z20.d-z23.d }\n"
+      "b 26f\n"
+      "25:"  // Width 3: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "26:"  // Width 3: setup done
+      "cmp x20, #0x10\n"
+      "ble 29f\n"
+      "27:"  // Width 3: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b622  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be22  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 28f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "28:"  // Width 3: Multiply loop: unique 5: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 27b\n"
+      "29:"  // Width 3: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b622  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be22  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "30:"  // Width 3: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 31f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "31:"  // Width 3: Multiply loop: unique 6: skip row sum
+      "tbnz %x[flags], #31, 32f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "saddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "32:"  // Width 3: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc0062c40  // mova { z0.d-z3.d }, za.d[x9, #2]\n"
+      ".inst 0xc1a5ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a4aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      ".inst 0xc1b0cea0  // sclamp { z0.s-z3.s }, z21.s, z16.s\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z0.h, z0.h, z1.h\n"
+      "uzp1 z1.h, z2.h, z3.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
+      "uzp1 z0.b, z0.b, z1.b\n"
+      "st1b { z0.b }, p1, [x24, #2, MUL VL]\n"
+      "addvl x24, x24, #3\n"
+      "33:"  // Width 3: Output done
+      "b 44f\n"
+      "34:"  // Width 4
+      "mov x19, #0x3\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 35f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      ".inst 0xa042c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042e82  // mova za.d[x9, #2], { z20.d-z23.d }\n"
+      ".inst 0xa043c2f0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x23, #0xc, MUL VL]\n"
+      ".inst 0xc0042e03  // mova za.d[x9, #3], { z16.d-z19.d }\n"
+      "addvl x23, x23, #16\n"
+      "b 36f\n"
+      "35:"  // Width 4: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "36:"  // Width 4: setup done
+      "cmp x20, #0x10\n"
+      "ble 39f\n"
+      "37:"  // Width 4: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b1a3  // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b622  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b5a3  // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      ".inst 0xa043833d  // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153bba3  // sdot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be22  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      ".inst 0xa0438331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153be23  // sdot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 38f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "38:"  // Width 4: Multiply loop: unique 7: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 37b\n"
+      "39:"  // Width 4: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b220  // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b1a3  // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6a0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b521  // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b622  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b5a3  // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153baa0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8a1  // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9a2  // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      ".inst 0xa043833d  // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153bba3  // sdot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bea0  // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153bea1  // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be22  // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      ".inst 0xa0438331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153be23  // sdot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "40:"  // Width 4: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 41f\n"
+      "sdot z26.s, z3.b, z24.b\n"
+      "41:"  // Width 4: Multiply loop: unique 8: skip row sum
+      "tbnz %x[flags], #31, 42f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "saddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "42:"  // Width 4: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc0062c40  // mova { z0.d-z3.d }, za.d[x9, #2]\n"
+      ".inst 0xc1a5ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+      ".inst 0xc0062c68  // mova { z8.d-z11.d }, za.d[x9, #3]\n"
+      ".inst 0xc1a5ac08  // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a4aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
+      ".inst 0xc1a4aa28  // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1a6ab08  // add { z8.s-z11.s }, { z8.s-z11.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      ".inst 0xc1b0cea0  // sclamp { z0.s-z3.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0cea8  // sclamp { z8.s-z11.s }, z21.s, z16.s\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z0.h, z0.h, z1.h\n"
+      "uzp1 z1.h, z2.h, z3.h\n"
+      "uzp1 z8.h, z8.h, z9.h\n"
+      "uzp1 z9.h, z10.h, z11.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
+      "uzp1 z0.b, z0.b, z1.b\n"
+      "uzp1 z8.b, z8.b, z9.b\n"
+      "st1b { z0.b }, p2, [x24, #2, MUL VL]\n"
+      "st1b { z8.b }, p1, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      "43:"  // Width 4: Output done
+      "subs x26, x26, #0x4\n"
+      "sub %x[N], %x[N], x27, LSL #2\n"
+      "bgt 4b\n"
+      "44:"  // Exit
+      ".inst 0xd503467f  // SMSTOP\n"
+      "ptrue p2.b\n"
+      : [N] "+&r" (N), [flags] "+&r" (flags)
+      : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [output_ptr] "r" (output_ptr), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL.hpp
new file mode 100644
index 0000000..e15b954
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL.hpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+#include "../std_transforms_sme.hpp"
+
+#define ARGLIST  \
+    const uint8_t *, const uint8_t *, \
+    uint8_t *, size_t, size_t, \
+    const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+void sme2_gemv_u8qa_dot_16VL( ARGLIST );
+
+class cls_sme2_gemv_u8qa_dot_16VL
+{
+public:
+    typedef uint8_t operand_type;
+    typedef uint8_t result_type;
+
+    typedef void (*kern_type)( ARGLIST );
+
+    static unsigned int out_width()
+    {
+        return sme::get_vector_length<uint32_t>() * 16;
+    }
+
+    static constexpr unsigned int k_unroll()
+    {
+        return 4;
+    }
+
+    static constexpr bool supports_accumulate()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_bias()
+    {
+        return false;
+    }
+
+    static constexpr bool supports_activation()
+    {
+        return false;
+    }
+
+
+    StdTransformsSME<operand_type, result_type, 1, 16, 4> transforms = {};
+
+
+    // Default to the generic kernel
+    kern_type kernel=sme2_gemv_u8qa_dot_16VL;
+    cls_sme2_gemv_u8qa_dot_16VL(const CPUInfo *)
+    {
+    }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
new file mode 100644
index 0000000..9822f63
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
@@ -0,0 +1,678 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void sme2_gemv_u8qa_dot_16VL (
+    const uint8_t *A_ptr, const uint8_t *B_ptr, uint8_t *output_ptr,
+    size_t N, size_t K,
+    const Requantize32 *qp, const int32_t *col_bias, unsigned int col_base
+)
+{
+    ARM_COMPUTE_UNUSED(col_base);
+
+    struct KernelArgs {
+        const uint8_t *B_ptr = {};
+        size_t output_offset = {};
+        unsigned int input_initial_col = {};
+    } ka;
+
+    unsigned long flags=0;
+    ka.B_ptr = B_ptr;
+    if (qp->c_offset > qp->minval) {
+        flags |= 0x20;
+    }
+    __asm__ __volatile__(
+      "ptrue p2.b\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cntw x27, ALL, MUL #4\n"
+      "add x26, %x[N], x27\n"
+      "sub x26, x26, #0x1\n"
+      "udiv x26, x26, x27\n"
+      "add x21, x26, #0x3\n"
+      "and x21, x21, #0xfffffffffffffffc\n"
+      "mul x21, x21, x27\n"
+      "mov x9, #0x0\n"
+      "mov x25, %x[B_ptr]\n"
+      "mov x24, %x[output_ptr]\n"
+      "ptrue p2.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "mul x21, x21, %x[K]\n"
+      "mov x20, #0x1\n"
+      "1:"  // RHS size check loop
+      "cmp x21, #0x200000\n"
+      "blt 2f\n"
+      "tbnz x21, #0, 3f\n"
+      "lsr x21, x21, #0x1\n"
+      "lsl x20, x20, #0x1\n"
+      "b 1b\n"
+      "2:"  // RHS do prefetch
+      "lsl x19, x21, #0x26\n"
+      "sub x20, x20, #0x1\n"
+      "lsl x20, x20, #0x16\n"
+      "orr x21, x21, x19\n"
+      "orr x21, x21, x20\n"
+      ".inst 0xf8b54b3a  // rprfm pldonce, x21, [x25]\n"
+      "3:"  // RHS prefetch exit
+      "mov x23, %x[col_bias]\n"
+      "mov z26.s, #0x0\n"
+      "mov z24.b, #0x1\n"
+      "bic %x[flags], %x[flags], #0x80000000\n"
+      "4:"  // Column loop
+      "cmp x26, #0x4\n"
+      "bge 34f\n"
+      "cmp x26, #0x2\n"
+      "bgt 24f\n"
+      "beq 14f\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "mov x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 5f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      "b 6f\n"
+      "5:"  // Width 1: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "6:"  // Width 1: setup done
+      "cmp x20, #0x10\n"
+      "ble 9f\n"
+      "7:"  // Width 1: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 8f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "8:"  // Width 1: Multiply loop: unique 1: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 7b\n"
+      "9:"  // Width 1: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 10f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "10:"  // Width 1: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 11f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "11:"  // Width 1: Multiply loop: unique 2: skip row sum
+      "tbnz %x[flags], #31, 12f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "uaddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "12:"  // Width 1: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p1, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "13:"  // Width 1: Output done
+      "b 44f\n"
+      "14:"  // Width 2
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "sub x19, %x[N], x27\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 15f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      "b 16f\n"
+      "15:"  // Width 2: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "16:"  // Width 2: setup done
+      "cmp x20, #0x10\n"
+      "ble 19f\n"
+      "17:"  // Width 2: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 18f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "18:"  // Width 2: Multiply loop: unique 3: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 17b\n"
+      "19:"  // Width 2: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 20f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "20:"  // Width 2: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 21f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "21:"  // Width 2: Multiply loop: unique 4: skip row sum
+      "tbnz %x[flags], #31, 22f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "uaddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "22:"  // Width 2: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p1, [x24, #1, MUL VL]\n"
+      "addvl x24, x24, #2\n"
+      "23:"  // Width 2: Output done
+      "b 44f\n"
+      "24:"  // Width 3
+      "mov x19, #0x2\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 25f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      ".inst 0xa042c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042e82  // mova za.d[x9, #2], { z20.d-z23.d }\n"
+      "b 26f\n"
+      "25:"  // Width 3: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "26:"  // Width 3: setup done
+      "cmp x20, #0x10\n"
+      "ble 29f\n"
+      "27:"  // Width 3: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b632  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be32  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 28f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "28:"  // Width 3: Multiply loop: unique 5: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 27b\n"
+      "29:"  // Width 3: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b632  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 30f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be32  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "30:"  // Width 3: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 31f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "31:"  // Width 3: Multiply loop: unique 6: skip row sum
+      "tbnz %x[flags], #31, 32f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "uaddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "32:"  // Width 3: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc0062c40  // mova { z0.d-z3.d }, za.d[x9, #2]\n"
+      ".inst 0xc1a5ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a4aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      ".inst 0xc1b0cea0  // sclamp { z0.s-z3.s }, z21.s, z16.s\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z0.h, z0.h, z1.h\n"
+      "uzp1 z1.h, z2.h, z3.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
+      "uzp1 z0.b, z0.b, z1.b\n"
+      "st1b { z0.b }, p1, [x24, #2, MUL VL]\n"
+      "addvl x24, x24, #3\n"
+      "33:"  // Width 3: Output done
+      "b 44f\n"
+      "34:"  // Width 4
+      "mov x19, #0x3\n"
+      "mov x22, %x[A_ptr]\n"
+      "mov x21, %x[K]\n"
+      "msub x19, x27, x19, %x[N]\n"
+      "mov x20, %x[K]\n"
+      ".inst 0xf8b54ad8  // rprfm pldmany, x21, [x22]\n"
+      "whilelt p1.b, XZR, x19\n"
+      "cbz x23, 35f\n"
+      ".inst 0xa040c2e4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
+      ".inst 0xc0042c80  // mova za.d[x9, #0], { z4.d-z7.d }\n"
+      ".inst 0xa041c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+      ".inst 0xc0042e81  // mova za.d[x9, #1], { z20.d-z23.d }\n"
+      ".inst 0xa042c2f4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
+      ".inst 0xc0042e82  // mova za.d[x9, #2], { z20.d-z23.d }\n"
+      ".inst 0xa043c2f0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x23, #0xc, MUL VL]\n"
+      ".inst 0xc0042e03  // mova za.d[x9, #3], { z16.d-z19.d }\n"
+      "addvl x23, x23, #16\n"
+      "b 36f\n"
+      "35:"  // Width 4: no bias
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "36:"  // Width 4: setup done
+      "cmp x20, #0x10\n"
+      "ble 39f\n"
+      "37:"  // Width 4: Multiply loop: Main loop head
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b1b3  // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b632  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b5b3  // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      ".inst 0xa043833d  // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153bbb3  // udot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be32  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      ".inst 0xa0438331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153be33  // udot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "tbnz %x[flags], #31, 38f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "38:"  // Width 4: Multiply loop: unique 7: skip row sum
+      "sub x20, x20, #0x10\n"
+      "cmp x20, #0x10\n"
+      "bgt 37b\n"
+      "39:"  // Width 4: Multiply loop: Single iteration only
+      "whilelt p0.b, XZR, x20\n"
+      "ld1rqb { z3.b }, p0/Z, [x22]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xa0408331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+      "add x22, x22, #0x10\n"
+      ".inst 0xc153b230  // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b0b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b1b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b1b3  // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153b6b0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
+      ".inst 0xa0418329  // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b531  // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b632  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
+      ".inst 0xa043832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153b5b3  // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      "subs x20, x20, #0x4\n"
+      ".inst 0xc153bab0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
+      ".inst 0xa0418325  // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153b8b1  // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
+      ".inst 0xa042832d  // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153b9b2  // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
+      ".inst 0xa043833d  // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153bbb3  // udot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
+      "addvl x25, x25, #16\n"
+      "ble 40f\n"
+      ".inst 0xa0408335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+      ".inst 0xc153beb0  // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0418335  // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
+      ".inst 0xc153beb1  // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
+      ".inst 0xa0428331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
+      ".inst 0xc153be32  // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
+      ".inst 0xa0438331  // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
+      ".inst 0xc153be33  // udot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
+      "addvl x25, x25, #16\n"
+      "40:"  // Width 4: Multiply loop: multiply skip
+      "tbnz %x[flags], #31, 41f\n"
+      "udot z26.s, z3.b, z24.b\n"
+      "41:"  // Width 4: Multiply loop: unique 8: skip row sum
+      "tbnz %x[flags], #31, 42f\n"
+      "add x20, %x[qp], %[b_offset]\n"
+      "mov x19, #0x4\n"
+      "ld1rw { z10.s }, p2/Z, [x20]\n"
+      "neg z10.s, p2/M, z10.s\n"
+      "whilelt p0.s, XZR, x19\n"
+      "uaddv d26, p0, z26.s\n"
+      "mov z26.s, z26.s[0]\n"
+      "mul z26.s, p2/M, z26.s, z10.s\n"
+      "orr %x[flags], %x[flags], #0x80000000\n"
+      "42:"  // Width 4: skip row sum fixup
+      ".inst 0xc0904b40  // addha za0.s, p2/M, p2/M, z26.s\n"
+      "add x19, %x[qp], %[per_layer_mul]\n"
+      "ld1rw { z5.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[per_layer_right_shift]\n"
+      ".inst 0xc0904b41  // addha za1.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z4.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[c_offset]\n"
+      "add x20, %x[qp], %[minval]\n"
+      ".inst 0xc0904b42  // addha za2.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z6.s }, p2/Z, [x19]\n"
+      "add x19, %x[qp], %[maxval]\n"
+      ".inst 0xc0904b43  // addha za3.s, p2/M, p2/M, z26.s\n"
+      "ld1rw { z21.s }, p2/Z, [x20]\n"
+      ".inst 0xc0062c1c  // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+      ".inst 0xc1a5ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+      ".inst 0xc0062c2c  // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+      ".inst 0xc1a5ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
+      ".inst 0xc0062c40  // mova { z0.d-z3.d }, za.d[x9, #2]\n"
+      ".inst 0xc1a5ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+      ".inst 0xc0062c68  // mova { z8.d-z11.d }, za.d[x9, #3]\n"
+      ".inst 0xc1a5ac08  // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
+      ".inst 0xc1a4aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
+      "ld1rw { z16.s }, p2/Z, [x19]\n"
+      ".inst 0xc1a4aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
+      ".inst 0xc1a4aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
+      ".inst 0xc1a4aa28  // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1a6ab08  // add { z8.s-z11.s }, { z8.s-z11.s }, z6.s\n"
+      ".inst 0xc1b0cebc  // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0ceac  // sclamp { z12.s-z15.s }, z21.s, z16.s\n"
+      "uzp1 z28.h, z28.h, z29.h\n"
+      ".inst 0xc1b0cea0  // sclamp { z0.s-z3.s }, z21.s, z16.s\n"
+      ".inst 0xc1b0cea8  // sclamp { z8.s-z11.s }, z21.s, z16.s\n"
+      "uzp1 z29.h, z30.h, z31.h\n"
+      "uzp1 z12.h, z12.h, z13.h\n"
+      "uzp1 z13.h, z14.h, z15.h\n"
+      "uzp1 z0.h, z0.h, z1.h\n"
+      "uzp1 z1.h, z2.h, z3.h\n"
+      "uzp1 z8.h, z8.h, z9.h\n"
+      "uzp1 z9.h, z10.h, z11.h\n"
+      "uzp1 z28.b, z28.b, z29.b\n"
+      "st1b { z28.b }, p2, [x24]\n"
+      "uzp1 z12.b, z12.b, z13.b\n"
+      "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
+      "uzp1 z0.b, z0.b, z1.b\n"
+      "uzp1 z8.b, z8.b, z9.b\n"
+      "st1b { z0.b }, p2, [x24, #2, MUL VL]\n"
+      "st1b { z8.b }, p1, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      "43:"  // Width 4: Output done
+      "subs x26, x26, #0x4\n"
+      "sub %x[N], %x[N], x27, LSL #2\n"
+      "bgt 4b\n"
+      "44:"  // Exit
+      ".inst 0xd503467f  // SMSTOP\n"
+      "ptrue p2.b\n"
+      : [N] "+&r" (N), [flags] "+&r" (flags)
+      : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [output_ptr] "r" (output_ptr), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000..37eb63d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include "../bfloat.hpp"
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL
+{
+public:
+  typedef bfloat16 operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 1;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 4;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 2;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL;
+
+  StdTransformsSME<operand_type, result_type, 1, 4, 2> transforms = {};
+
+  cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000..bb8cad3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include "../../bfloat.hpp"
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const bfloat16 *const A,
+      const bfloat16 *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const bfloat16 *const A;
+    const bfloat16 *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x14, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x14, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa041c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5a4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c5a4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x13, x13, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w10, [%x[args], %[offsetof_M]]\n"
+      "mov x9, #0x0\n"
+      "mov x28, #0x0\n"
+      "ldr w27, [%x[args], %[offsetof_N]]\n"
+      "ldr x26, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x25, x26\n"
+      ".inst 0x25bb6790  // whilelt pn8.s, x28, x27, VLx4\n"
+      "tbnz x14, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z21.s, #1.0\n"
+      ".inst 0xa01cc27d  // ldnt1w { z28.s-z31.s }, p8/Z, [x19, x28, LSL #2]\n"
+      ".inst 0x809c02a0  // fmopa za0.s, p0/M, p0/M, z21.s, z28.s\n"
+      ".inst 0x809d02a1  // fmopa za1.s, p0/M, p0/M, z21.s, z29.s\n"
+      ".inst 0x809e02a2  // fmopa za2.s, p0/M, p0/M, z21.s, z30.s\n"
+      ".inst 0x809f02a3  // fmopa za3.s, p0/M, p0/M, z21.s, z31.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x28\n"
+      "mov x20, x9\n"
+      "incw x19, ALL, MUL #4\n"
+      "incw x20\n"
+      "cmp x19, x27\n"
+      "csel x20, x9, x20, LT\n"
+      "mov x19, x14\n"
+      "bfm x14, XZR, #0x0, #0x0  // bfc x14, #0x0, #0x1\n"
+      "cmp x20, x10\n"
+      "csel x14, x19, x14, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x1\n"
+      "lsr x19, x19, #0x1\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x28, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      "ld1h { z0.h }, p0/Z, [x25]\n"
+      ".inst 0xa140a6db  // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
+      "ld1h { z13.h }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0xa141a6ca  // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      "ld1h { z12.h }, p0/Z, [x25, #2, MUL VL]\n"
+      ".inst 0xa142a6cb  // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      "ld1h { z26.h }, p0/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      ".inst 0xa143a6d8  // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x81930000  // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0x81970001  // bfmopa za1.s, p0/M, p0/M, z0.h, z23.h\n"
+      ".inst 0x819b0002  // bfmopa za2.s, p0/M, p0/M, z0.h, z27.h\n"
+      ".inst 0x819f0003  // bfmopa za3.s, p0/M, p0/M, z0.h, z31.h\n"
+      "ld1h { z0.h }, p0/Z, [x25]\n"
+      ".inst 0x818201a0  // bfmopa za0.s, p0/M, p0/M, z13.h, z2.h\n"
+      ".inst 0xa140a6db  // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
+      ".inst 0x818601a1  // bfmopa za1.s, p0/M, p0/M, z13.h, z6.h\n"
+      ".inst 0x818a01a2  // bfmopa za2.s, p0/M, p0/M, z13.h, z10.h\n"
+      ".inst 0x818e01a3  // bfmopa za3.s, p0/M, p0/M, z13.h, z14.h\n"
+      "ld1h { z13.h }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x81830180  // bfmopa za0.s, p0/M, p0/M, z12.h, z3.h\n"
+      ".inst 0xa141a6ca  // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0x81870181  // bfmopa za1.s, p0/M, p0/M, z12.h, z7.h\n"
+      ".inst 0x818b0182  // bfmopa za2.s, p0/M, p0/M, z12.h, z11.h\n"
+      ".inst 0x818f0183  // bfmopa za3.s, p0/M, p0/M, z12.h, z15.h\n"
+      "ld1h { z12.h }, p0/Z, [x25, #2, MUL VL]\n"
+      ".inst 0xa142a6cb  // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      ".inst 0x81900340  // bfmopa za0.s, p0/M, p0/M, z26.h, z16.h\n"
+      ".inst 0x81940341  // bfmopa za1.s, p0/M, p0/M, z26.h, z20.h\n"
+      ".inst 0x81980342  // bfmopa za2.s, p0/M, p0/M, z26.h, z24.h\n"
+      ".inst 0x819c0343  // bfmopa za3.s, p0/M, p0/M, z26.h, z28.h\n"
+      "ld1h { z26.h }, p0/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      ".inst 0xa143a6d8  // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x81930000  // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
+      ".inst 0x81970001  // bfmopa za1.s, p0/M, p0/M, z0.h, z23.h\n"
+      ".inst 0x819b0002  // bfmopa za2.s, p0/M, p0/M, z0.h, z27.h\n"
+      ".inst 0x819f0003  // bfmopa za3.s, p0/M, p0/M, z0.h, z31.h\n"
+      ".inst 0x818201a0  // bfmopa za0.s, p0/M, p0/M, z13.h, z2.h\n"
+      ".inst 0x818601a1  // bfmopa za1.s, p0/M, p0/M, z13.h, z6.h\n"
+      ".inst 0x818a01a2  // bfmopa za2.s, p0/M, p0/M, z13.h, z10.h\n"
+      ".inst 0x818e01a3  // bfmopa za3.s, p0/M, p0/M, z13.h, z14.h\n"
+      ".inst 0x81830180  // bfmopa za0.s, p0/M, p0/M, z12.h, z3.h\n"
+      ".inst 0x81870181  // bfmopa za1.s, p0/M, p0/M, z12.h, z7.h\n"
+      ".inst 0x818b0182  // bfmopa za2.s, p0/M, p0/M, z12.h, z11.h\n"
+      ".inst 0x818f0183  // bfmopa za3.s, p0/M, p0/M, z12.h, z15.h\n"
+      ".inst 0x81900340  // bfmopa za0.s, p0/M, p0/M, z26.h, z16.h\n"
+      ".inst 0x81940341  // bfmopa za1.s, p0/M, p0/M, z26.h, z20.h\n"
+      ".inst 0x81980342  // bfmopa za2.s, p0/M, p0/M, z26.h, z24.h\n"
+      ".inst 0x819c0343  // bfmopa za3.s, p0/M, p0/M, z26.h, z28.h\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      "ld1h { z0.h }, p0/Z, [x25]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x25, x25, #1\n"
+      ".inst 0xa140a6d3  // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #4\n"
+      ".inst 0x81930000  // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
+      ".inst 0x81970001  // bfmopa za1.s, p0/M, p0/M, z0.h, z23.h\n"
+      ".inst 0x819b0002  // bfmopa za2.s, p0/M, p0/M, z0.h, z27.h\n"
+      ".inst 0x819f0003  // bfmopa za3.s, p0/M, p0/M, z0.h, z31.h\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x14, #1, 14f\n"
+      "tbz x14, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0860418  // mova { z24.s-z27.s }, za0h.s[x12]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc0860434  // mova { z20.s-z23.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5bc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840781  // mova za1h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840582  // mova za2h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa043c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c578  // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
+      "addvl x13, x13, #16\n"
+      ".inst 0xa061c574  // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+      ".inst 0xa062c57c  // st1w { z28.s-z31.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+      ".inst 0xa063c570  // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+      "addvl x11, x11, #16\n"
+      "blt 11b\n"
+      "b 24f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xa060c56c  // st1w { z12.s-z15.s }, pn9.b, [x11]\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860464  // mova { z4.s-z7.s }, za3h.s[x12]\n"
+      ".inst 0xa061c57c  // st1w { z28.s-z31.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c570  // st1w { z16.s-z19.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+      ".inst 0xa063c564  // st1w { z4.s-z7.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+      "addvl x11, x11, #16\n"
+      "blt 13b\n"
+      "b 24f\n"
+      "14:"  // Store to output array
+      "ldr x24, [%x[args], %[offsetof_C]]\n"
+      "add x24, x24, x28, LSL #2\n"  // C += n
+      "sub x23, x10, x9\n"
+      "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x24, x9, x22, x24\n"  // C += m * ldc
+      "tbz x14, #2, 18f\n"
+      "cntw x19\n"
+      "cmp x23, x19\n"
+      "csel x21, x23, x19, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa160c303  // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 17f\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x23, x23, x21\n"
+      "beq 18f\n"
+      "b 22f\n"
+      "18:"  // Store to output array: Skip activation: End
+      "cntw x19\n"
+      "cmp x23, x19\n"
+      "ld1rw { z23.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x19, x23, x19, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "ld1rw { z16.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc1b0cae0  // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0cae4  // fclamp { z4.s-z7.s }, z23.s, z16.s\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xc1b0cae8  // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0caec  // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      ".inst 0xa160c303  // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc1b0cae0  // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0cae4  // fclamp { z4.s-z7.s }, z23.s, z16.s\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xc1b0cae8  // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0caec  // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 21f\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "21:"  // Store to output array: Accumulator row 0 oddments: End
+      "22:"  // Store to output array: End
+      "tbz x14, #0, 24f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "23:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5a8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840503  // mova za3h.s[x12], { z8.s-z11.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x13, x13, #16\n"
+      "blt 23b\n"
+      "24:"  // End block
+      "incw x28, ALL, MUL #4\n"
+      "cmp x28, x27\n"
+      "blt 3b\n"
+      "incw x9\n"
+      "cmp x9, x10\n"
+      "mov x28, #0x0\n"
+      "mov x26, x25\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000..89c79cf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include "../bfloat.hpp"
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL
+{
+public:
+  typedef bfloat16 operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 2;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 2;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 2;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL;
+
+  StdTransformsSME<operand_type, result_type, 2, 2, 2> transforms = {};
+
+  cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000..a4a40ad
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include "../../bfloat.hpp"
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const bfloat16 *const A,
+      const bfloat16 *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const bfloat16 *const A;
+    const bfloat16 *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5c8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840500  // mova za0h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      ".inst 0x25bc4530  // whilelt pn8.s, x9, x28, VLx2\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z21.s, #1.0\n"
+      ".inst 0xa009426f  // ldnt1w { z14.s-z15.s }, p8/Z, [x19, x9, LSL #2]\n"
+      ".inst 0x808e02a0  // fmopa za0.s, p0/M, p0/M, z21.s, z14.s\n"
+      ".inst 0x808f02a1  // fmopa za1.s, p0/M, p0/M, z21.s, z15.s\n"
+      ".inst 0x808e02a2  // fmopa za2.s, p0/M, p0/M, z21.s, z14.s\n"
+      ".inst 0x808f02a3  // fmopa za3.s, p0/M, p0/M, z21.s, z15.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19, ALL, MUL #2\n"
+      "incw x20, ALL, MUL #2\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x1\n"
+      "lsr x19, x19, #0x1\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1402747  // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
+      ".inst 0xa14026df  // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
+      ".inst 0xa0412748  // ld1h { z8.h-z9.h }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa04126c3  // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa1422752  // ld1h { z18.h, z26.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa04226d1  // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa1432756  // ld1h { z22.h, z30.h }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14326cc  // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x819700e0  // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0x819f00e1  // bfmopa za1.s, p0/M, p0/M, z7.h, z31.h\n"
+      ".inst 0x819701e2  // bfmopa za2.s, p0/M, p0/M, z15.h, z23.h\n"
+      ".inst 0x819f01e3  // bfmopa za3.s, p0/M, p0/M, z15.h, z31.h\n"
+      ".inst 0xa1402747  // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
+      ".inst 0x81820100  // bfmopa za0.s, p0/M, p0/M, z8.h, z2.h\n"
+      ".inst 0xa14026df  // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
+      ".inst 0x81830101  // bfmopa za1.s, p0/M, p0/M, z8.h, z3.h\n"
+      ".inst 0x81820122  // bfmopa za2.s, p0/M, p0/M, z9.h, z2.h\n"
+      ".inst 0x81830123  // bfmopa za3.s, p0/M, p0/M, z9.h, z3.h\n"
+      ".inst 0xa0412748  // ld1h { z8.h-z9.h }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0x81900240  // bfmopa za0.s, p0/M, p0/M, z18.h, z16.h\n"
+      ".inst 0xa04126c3  // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0x81910241  // bfmopa za1.s, p0/M, p0/M, z18.h, z17.h\n"
+      ".inst 0x81900342  // bfmopa za2.s, p0/M, p0/M, z26.h, z16.h\n"
+      ".inst 0x81910343  // bfmopa za3.s, p0/M, p0/M, z26.h, z17.h\n"
+      ".inst 0xa1422752  // ld1h { z18.h, z26.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa04226d1  // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0x818402c0  // bfmopa za0.s, p0/M, p0/M, z22.h, z4.h\n"
+      ".inst 0x818c02c1  // bfmopa za1.s, p0/M, p0/M, z22.h, z12.h\n"
+      ".inst 0x818403c2  // bfmopa za2.s, p0/M, p0/M, z30.h, z4.h\n"
+      ".inst 0x818c03c3  // bfmopa za3.s, p0/M, p0/M, z30.h, z12.h\n"
+      ".inst 0xa1432756  // ld1h { z22.h, z30.h }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14326cc  // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x819700e0  // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
+      ".inst 0x819f00e1  // bfmopa za1.s, p0/M, p0/M, z7.h, z31.h\n"
+      ".inst 0x819701e2  // bfmopa za2.s, p0/M, p0/M, z15.h, z23.h\n"
+      ".inst 0x819f01e3  // bfmopa za3.s, p0/M, p0/M, z15.h, z31.h\n"
+      ".inst 0x81820100  // bfmopa za0.s, p0/M, p0/M, z8.h, z2.h\n"
+      ".inst 0x81830101  // bfmopa za1.s, p0/M, p0/M, z8.h, z3.h\n"
+      ".inst 0x81820122  // bfmopa za2.s, p0/M, p0/M, z9.h, z2.h\n"
+      ".inst 0x81830123  // bfmopa za3.s, p0/M, p0/M, z9.h, z3.h\n"
+      ".inst 0x81900240  // bfmopa za0.s, p0/M, p0/M, z18.h, z16.h\n"
+      ".inst 0x81910241  // bfmopa za1.s, p0/M, p0/M, z18.h, z17.h\n"
+      ".inst 0x81900342  // bfmopa za2.s, p0/M, p0/M, z26.h, z16.h\n"
+      ".inst 0x81910343  // bfmopa za3.s, p0/M, p0/M, z26.h, z17.h\n"
+      ".inst 0x818402c0  // bfmopa za0.s, p0/M, p0/M, z22.h, z4.h\n"
+      ".inst 0x818c02c1  // bfmopa za1.s, p0/M, p0/M, z22.h, z12.h\n"
+      ".inst 0x818403c2  // bfmopa za2.s, p0/M, p0/M, z30.h, z4.h\n"
+      ".inst 0x818c03c3  // bfmopa za3.s, p0/M, p0/M, z30.h, z12.h\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa1402747  // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xa14026d7  // ld1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #2\n"
+      ".inst 0x819700e0  // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
+      ".inst 0x819f00e1  // bfmopa za1.s, p0/M, p0/M, z7.h, z31.h\n"
+      ".inst 0x819701e2  // bfmopa za2.s, p0/M, p0/M, z15.h, z23.h\n"
+      ".inst 0x819f01e3  // bfmopa za3.s, p0/M, p0/M, z15.h, z31.h\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840401  // mova za1h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860468  // mova { z8.s-z11.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840702  // mova za2h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa043c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840703  // mova za3h.s[x12], { z24.s-z27.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c5b4  // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 30f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xa060c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xa061c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c5a4  // st1w { z4.s-z7.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a0  // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 30f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "tbz x15, #2, 21f\n"
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 21f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Skip activation: Accumulator row 1 loop
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Skip activation: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc086047c  // mova { z28.s-z31.s }, za3h.s[x12]\n"
+      ".inst 0xa1604334  // st1w { z20.s, z28.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604335  // st1w { z21.s, z29.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      ".inst 0xa1604336  // st1w { z22.s, z30.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "20:"  // Store to output array: Skip activation: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 21f\n"
+      "b 28f\n"
+      "21:"  // Store to output array: Skip activation: End
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 23f\n"
+      "22:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xc1b4caa4  // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4caac  // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 22b\n"
+      "23:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 24f\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xc1b4caa0  // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4caa8  // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604320  // st1w { z0.s, z8.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604321  // st1w { z1.s, z9.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      ".inst 0xa1604322  // st1w { z2.s, z10.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "24:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 26f\n"
+      "25:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xc1b4cab0  // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4cab8  // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604333  // st1w { z19.s, z27.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 25b\n"
+      "26:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 27f\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xc1b4cab0  // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4cab8  // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "27:"  // Store to output array: Accumulator row 1 oddments: End
+      "28:"  // Store to output array: End
+      "tbz x15, #0, 30f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "29:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5c8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840503  // mova za3h.s[x12], { z8.s-z11.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 29b\n"
+      "30:"  // End block
+      "incw x9, ALL, MUL #2\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #2\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000..0d407e0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include "../bfloat.hpp"
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL
+{
+public:
+  typedef bfloat16 operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 4;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 1;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 2;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL;
+
+  StdTransformsSME<operand_type, result_type, 4, 1, 2> transforms = {};
+
+  cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000..798a3cb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include "../../bfloat.hpp"
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const bfloat16 *const A,
+      const bfloat16 *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const bfloat16 *const A;
+    const bfloat16 *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840480  // mova za0h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa041c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa042c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      "whilelt p0.s, x9, x28\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z8.s, #1.0\n"
+      "ldnt1w { z27.s }, p0/Z, [x19, x9, LSL #2]\n"
+      ".inst 0x809b2500  // fmopa za0.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2501  // fmopa za1.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2502  // fmopa za2.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2503  // fmopa za3.s, p1/M, p1/M, z8.s, z27.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19\n"
+      "incw x20, ALL, MUL #4\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x1\n"
+      "lsr x19, x19, #0x1\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa040a344  // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
+      "ldnt1h { z29.h }, p1/Z, [x22]\n"
+      ".inst 0xa041a34c  // ld1h { z12.h-z15.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      "ldnt1h { z23.h }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa042a340  // ld1h { z0.h-z3.h }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1h { z21.h }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa143a352  // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x819d2480  // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0x819d24a1  // bfmopa za1.s, p1/M, p1/M, z5.h, z29.h\n"
+      ".inst 0x819d24c2  // bfmopa za2.s, p1/M, p1/M, z6.h, z29.h\n"
+      ".inst 0x819d24e3  // bfmopa za3.s, p1/M, p1/M, z7.h, z29.h\n"
+      ".inst 0xa040a344  // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
+      ".inst 0x81972580  // bfmopa za0.s, p1/M, p1/M, z12.h, z23.h\n"
+      "ldnt1h { z29.h }, p1/Z, [x22]\n"
+      ".inst 0x819725a1  // bfmopa za1.s, p1/M, p1/M, z13.h, z23.h\n"
+      ".inst 0x819725c2  // bfmopa za2.s, p1/M, p1/M, z14.h, z23.h\n"
+      ".inst 0x819725e3  // bfmopa za3.s, p1/M, p1/M, z15.h, z23.h\n"
+      ".inst 0xa041a34c  // ld1h { z12.h-z15.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0x81952400  // bfmopa za0.s, p1/M, p1/M, z0.h, z21.h\n"
+      "ldnt1h { z23.h }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0x81952421  // bfmopa za1.s, p1/M, p1/M, z1.h, z21.h\n"
+      ".inst 0x81952442  // bfmopa za2.s, p1/M, p1/M, z2.h, z21.h\n"
+      ".inst 0x81952463  // bfmopa za3.s, p1/M, p1/M, z3.h, z21.h\n"
+      ".inst 0xa042a340  // ld1h { z0.h-z3.h }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1h { z21.h }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0x819b2640  // bfmopa za0.s, p1/M, p1/M, z18.h, z27.h\n"
+      ".inst 0x819b26c1  // bfmopa za1.s, p1/M, p1/M, z22.h, z27.h\n"
+      ".inst 0x819b2742  // bfmopa za2.s, p1/M, p1/M, z26.h, z27.h\n"
+      ".inst 0x819b27c3  // bfmopa za3.s, p1/M, p1/M, z30.h, z27.h\n"
+      ".inst 0xa143a352  // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x819d2480  // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
+      ".inst 0x819d24a1  // bfmopa za1.s, p1/M, p1/M, z5.h, z29.h\n"
+      ".inst 0x819d24c2  // bfmopa za2.s, p1/M, p1/M, z6.h, z29.h\n"
+      ".inst 0x819d24e3  // bfmopa za3.s, p1/M, p1/M, z7.h, z29.h\n"
+      ".inst 0x81972580  // bfmopa za0.s, p1/M, p1/M, z12.h, z23.h\n"
+      ".inst 0x819725a1  // bfmopa za1.s, p1/M, p1/M, z13.h, z23.h\n"
+      ".inst 0x819725c2  // bfmopa za2.s, p1/M, p1/M, z14.h, z23.h\n"
+      ".inst 0x819725e3  // bfmopa za3.s, p1/M, p1/M, z15.h, z23.h\n"
+      ".inst 0x81952400  // bfmopa za0.s, p1/M, p1/M, z0.h, z21.h\n"
+      ".inst 0x81952421  // bfmopa za1.s, p1/M, p1/M, z1.h, z21.h\n"
+      ".inst 0x81952442  // bfmopa za2.s, p1/M, p1/M, z2.h, z21.h\n"
+      ".inst 0x81952463  // bfmopa za3.s, p1/M, p1/M, z3.h, z21.h\n"
+      ".inst 0x819b2640  // bfmopa za0.s, p1/M, p1/M, z18.h, z27.h\n"
+      ".inst 0x819b26c1  // bfmopa za1.s, p1/M, p1/M, z22.h, z27.h\n"
+      ".inst 0x819b2742  // bfmopa za2.s, p1/M, p1/M, z26.h, z27.h\n"
+      ".inst 0x819b27c3  // bfmopa za3.s, p1/M, p1/M, z30.h, z27.h\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa040a344  // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #4\n"
+      "ld1h { z29.h }, p1/Z, [x22]\n"
+      "addvl x22, x22, #1\n"
+      ".inst 0x819d2480  // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
+      ".inst 0x819d24a1  // bfmopa za1.s, p1/M, p1/M, z5.h, z29.h\n"
+      ".inst 0x819d24c2  // bfmopa za2.s, p1/M, p1/M, z6.h, z29.h\n"
+      ".inst 0x819d24e3  // bfmopa za3.s, p1/M, p1/M, z7.h, z29.h\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c1c8  // ld1w { z8.s-z11.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0860418  // mova { z24.s-z27.s }, za0h.s[x12]\n"
+      ".inst 0xc0840500  // mova za0h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xa041c1cc  // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xa042c1c8  // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840502  // mova za2h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xa043c1dc  // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c1a4  // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 42f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc0860438  // mova { z24.s-z27.s }, za1h.s[x12]\n"
+      ".inst 0xa060c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13]\n"
+      ".inst 0xc0860440  // mova { z0.s-z3.s }, za2h.s[x12]\n"
+      ".inst 0xc0860468  // mova { z8.s-z11.s }, za3h.s[x12]\n"
+      ".inst 0xa061c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 42f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "tbz x15, #2, 27f\n"
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Skip activation: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Skip activation: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "20:"  // Store to output array: Skip activation: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 22f\n"
+      "21:"  // Store to output array: Skip activation: Accumulator row 2 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 21b\n"
+      "22:"  // Store to output array: Skip activation: Accumulator row 2 oddments
+      "cbz x19, 23f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 23f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 23f\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "23:"  // Store to output array: Skip activation: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 25f\n"
+      "24:"  // Store to output array: Skip activation: Accumulator row 3 loop
+      ".inst 0xc0860464  // mova { z4.s-z7.s }, za3h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z7.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 24b\n"
+      "25:"  // Store to output array: Skip activation: Accumulator row 3 oddments
+      "cbz x19, 26f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      "st1w { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 26f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 26f\n"
+      "st1w { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "26:"  // Store to output array: Skip activation: Accumulator row 3 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "b 40f\n"
+      "27:"  // Store to output array: Skip activation: End
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "ld1rw { z25.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "ld1rw { z24.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 29f\n"
+      "28:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860414  // mova { z20.s-z23.s }, za0h.s[x12]\n"
+      ".inst 0xc1b8cb34  // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 28b\n"
+      "29:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 30f\n"
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb28  // fclamp { z8.s-z11.s }, z25.s, z24.s\n"
+      "st1w { z8.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 30f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z9.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 30f\n"
+      "st1w { z10.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "30:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 32f\n"
+      "31:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 31b\n"
+      "32:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 33f\n"
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 33f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 33f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "33:"  // Store to output array: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 35f\n"
+      "34:"  // Store to output array: Accumulator row 2 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 34b\n"
+      "35:"  // Store to output array: Accumulator row 2 oddments
+      "cbz x19, 36f\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 36f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 36f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "36:"  // Store to output array: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 38f\n"
+      "37:"  // Store to output array: Accumulator row 3 loop
+      ".inst 0xc0860474  // mova { z20.s-z23.s }, za3h.s[x12]\n"
+      ".inst 0xc1b8cb34  // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 37b\n"
+      "38:"  // Store to output array: Accumulator row 3 oddments
+      "cbz x19, 39f\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 39f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 39f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "39:"  // Store to output array: Accumulator row 3 oddments: End
+      "40:"  // Store to output array: End
+      "tbz x15, #0, 42f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "41:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840682  // mova za2h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 41b\n"
+      "42:"  // End block
+      "incw x9\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #4\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000..7777349
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL
+{
+public:
+  typedef float operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 1;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 4;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 1;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_1VLx4VL;
+
+  StdTransformsSME<operand_type, result_type, 1, 4, 1> transforms = {};
+
+  cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000..4f6d9a3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const float *const A,
+      const float *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(K * sizeof(float)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const float *const A;
+    const float *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x14, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x14, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa041c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5a4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c5a4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x13, x13, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w10, [%x[args], %[offsetof_M]]\n"
+      "mov x9, #0x0\n"
+      "mov x28, #0x0\n"
+      "ldr w27, [%x[args], %[offsetof_N]]\n"
+      "ldr x26, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x25, x26\n"
+      ".inst 0x25bb6790  // whilelt pn8.s, x28, x27, VLx4\n"
+      "tbnz x14, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z21.s, #1.0\n"
+      ".inst 0xa01cc27d  // ldnt1w { z28.s-z31.s }, p8/Z, [x19, x28, LSL #2]\n"
+      ".inst 0x809c02a0  // fmopa za0.s, p0/M, p0/M, z21.s, z28.s\n"
+      ".inst 0x809d02a1  // fmopa za1.s, p0/M, p0/M, z21.s, z29.s\n"
+      ".inst 0x809e02a2  // fmopa za2.s, p0/M, p0/M, z21.s, z30.s\n"
+      ".inst 0x809f02a3  // fmopa za3.s, p0/M, p0/M, z21.s, z31.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x28\n"
+      "mov x20, x9\n"
+      "incw x19, ALL, MUL #4\n"
+      "incw x20\n"
+      "cmp x19, x27\n"
+      "csel x20, x9, x20, LT\n"
+      "mov x19, x14\n"
+      "bfm x14, XZR, #0x0, #0x0  // bfc x14, #0x0, #0x1\n"
+      "cmp x20, x10\n"
+      "csel x14, x19, x14, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "lsr x22, x19, #0x2\n"
+      "and x21, x19, #0x3\n"
+      "ldr x20, [%x[args], %[offsetof_B]]\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x20, x28, x19, x20\n"  // bptr = B + n * kstride_bytes
+      "cbz x22, 8f\n"
+      "subs x22, x22, #0x1\n"
+      "ld1w { z0.s }, p0/Z, [x25]\n"
+      ".inst 0xa140c69b  // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
+      "ld1w { z13.s }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0xa141c68a  // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+      "ld1w { z12.s }, p0/Z, [x25, #2, MUL VL]\n"
+      ".inst 0xa142c68b  // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x20, #0x8, MUL VL]\n"
+      "ld1w { z26.s }, p0/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      ".inst 0xa143c698  // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x20, #0xc, MUL VL]\n"
+      "addvl x20, x20, #16\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x80930000  // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
+      "subs x22, x22, #0x1\n"
+      ".inst 0x80970001  // fmopa za1.s, p0/M, p0/M, z0.s, z23.s\n"
+      ".inst 0x809b0002  // fmopa za2.s, p0/M, p0/M, z0.s, z27.s\n"
+      ".inst 0x809f0003  // fmopa za3.s, p0/M, p0/M, z0.s, z31.s\n"
+      "ld1w { z0.s }, p0/Z, [x25]\n"
+      ".inst 0x808201a0  // fmopa za0.s, p0/M, p0/M, z13.s, z2.s\n"
+      ".inst 0xa140c69b  // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
+      ".inst 0x808601a1  // fmopa za1.s, p0/M, p0/M, z13.s, z6.s\n"
+      ".inst 0x808a01a2  // fmopa za2.s, p0/M, p0/M, z13.s, z10.s\n"
+      ".inst 0x808e01a3  // fmopa za3.s, p0/M, p0/M, z13.s, z14.s\n"
+      "ld1w { z13.s }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x80830180  // fmopa za0.s, p0/M, p0/M, z12.s, z3.s\n"
+      ".inst 0xa141c68a  // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+      ".inst 0x80870181  // fmopa za1.s, p0/M, p0/M, z12.s, z7.s\n"
+      ".inst 0x808b0182  // fmopa za2.s, p0/M, p0/M, z12.s, z11.s\n"
+      ".inst 0x808f0183  // fmopa za3.s, p0/M, p0/M, z12.s, z15.s\n"
+      "ld1w { z12.s }, p0/Z, [x25, #2, MUL VL]\n"
+      ".inst 0xa142c68b  // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x20, #0x8, MUL VL]\n"
+      ".inst 0x80900340  // fmopa za0.s, p0/M, p0/M, z26.s, z16.s\n"
+      ".inst 0x80940341  // fmopa za1.s, p0/M, p0/M, z26.s, z20.s\n"
+      ".inst 0x80980342  // fmopa za2.s, p0/M, p0/M, z26.s, z24.s\n"
+      ".inst 0x809c0343  // fmopa za3.s, p0/M, p0/M, z26.s, z28.s\n"
+      "ld1w { z26.s }, p0/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      ".inst 0xa143c698  // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x20, #0xc, MUL VL]\n"
+      "addvl x20, x20, #16\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x80930000  // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
+      ".inst 0x80970001  // fmopa za1.s, p0/M, p0/M, z0.s, z23.s\n"
+      ".inst 0x809b0002  // fmopa za2.s, p0/M, p0/M, z0.s, z27.s\n"
+      ".inst 0x809f0003  // fmopa za3.s, p0/M, p0/M, z0.s, z31.s\n"
+      ".inst 0x808201a0  // fmopa za0.s, p0/M, p0/M, z13.s, z2.s\n"
+      ".inst 0x808601a1  // fmopa za1.s, p0/M, p0/M, z13.s, z6.s\n"
+      ".inst 0x808a01a2  // fmopa za2.s, p0/M, p0/M, z13.s, z10.s\n"
+      ".inst 0x808e01a3  // fmopa za3.s, p0/M, p0/M, z13.s, z14.s\n"
+      ".inst 0x80830180  // fmopa za0.s, p0/M, p0/M, z12.s, z3.s\n"
+      ".inst 0x80870181  // fmopa za1.s, p0/M, p0/M, z12.s, z7.s\n"
+      ".inst 0x808b0182  // fmopa za2.s, p0/M, p0/M, z12.s, z11.s\n"
+      ".inst 0x808f0183  // fmopa za3.s, p0/M, p0/M, z12.s, z15.s\n"
+      ".inst 0x80900340  // fmopa za0.s, p0/M, p0/M, z26.s, z16.s\n"
+      ".inst 0x80940341  // fmopa za1.s, p0/M, p0/M, z26.s, z20.s\n"
+      ".inst 0x80980342  // fmopa za2.s, p0/M, p0/M, z26.s, z24.s\n"
+      ".inst 0x809c0343  // fmopa za3.s, p0/M, p0/M, z26.s, z28.s\n"
+      "8:"  // K oddments
+      "cbz x21, 10f\n"
+      "9:"  // K oddments: Loop
+      "ld1w { z0.s }, p0/Z, [x25]\n"
+      "subs x21, x21, #0x1\n"
+      "addvl x25, x25, #1\n"
+      ".inst 0xa140c693  // ld1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
+      "addvl x20, x20, #4\n"
+      ".inst 0x80930000  // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
+      ".inst 0x80970001  // fmopa za1.s, p0/M, p0/M, z0.s, z23.s\n"
+      ".inst 0x809b0002  // fmopa za2.s, p0/M, p0/M, z0.s, z27.s\n"
+      ".inst 0x809f0003  // fmopa za3.s, p0/M, p0/M, z0.s, z31.s\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x14, #1, 14f\n"
+      "tbz x14, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0860418  // mova { z24.s-z27.s }, za0h.s[x12]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc0860434  // mova { z20.s-z23.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5bc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840781  // mova za1h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840582  // mova za2h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa043c5ac  // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c578  // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
+      "addvl x13, x13, #16\n"
+      ".inst 0xa061c574  // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+      ".inst 0xa062c57c  // st1w { z28.s-z31.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+      ".inst 0xa063c570  // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+      "addvl x11, x11, #16\n"
+      "blt 11b\n"
+      "b 24f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xa060c56c  // st1w { z12.s-z15.s }, pn9.b, [x11]\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860464  // mova { z4.s-z7.s }, za3h.s[x12]\n"
+      ".inst 0xa061c57c  // st1w { z28.s-z31.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c570  // st1w { z16.s-z19.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+      ".inst 0xa063c564  // st1w { z4.s-z7.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+      "addvl x11, x11, #16\n"
+      "blt 13b\n"
+      "b 24f\n"
+      "14:"  // Store to output array
+      "ldr x24, [%x[args], %[offsetof_C]]\n"
+      "add x24, x24, x28, LSL #2\n"  // C += n
+      "sub x23, x10, x9\n"
+      "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x24, x9, x22, x24\n"  // C += m * ldc
+      "tbz x14, #2, 18f\n"
+      "cntw x19\n"
+      "cmp x23, x19\n"
+      "csel x21, x23, x19, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa160c303  // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 17f\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x23, x23, x21\n"
+      "beq 18f\n"
+      "b 22f\n"
+      "18:"  // Store to output array: Skip activation: End
+      "cntw x19\n"
+      "cmp x23, x19\n"
+      "ld1rw { z23.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x19, x23, x19, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "ld1rw { z16.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc1b0cae0  // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0cae4  // fclamp { z4.s-z7.s }, z23.s, z16.s\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xc1b0cae8  // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0caec  // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      ".inst 0xa160c303  // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc1b0cae0  // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0cae4  // fclamp { z4.s-z7.s }, z23.s, z16.s\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xc1b0cae8  // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
+      ".inst 0xc1b0caec  // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c300  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c301  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+      "add x24, x24, x22\n"
+      "beq 21f\n"
+      ".inst 0xa160c302  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+      "21:"  // Store to output array: Accumulator row 0 oddments: End
+      "22:"  // Store to output array: End
+      "tbz x14, #0, 24f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "23:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5b0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5a8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+      ".inst 0xc0840503  // mova za3h.s[x12], { z8.s-z11.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x13, x13, #16\n"
+      "blt 23b\n"
+      "24:"  // End block
+      "incw x28, ALL, MUL #4\n"
+      "cmp x28, x27\n"
+      "blt 3b\n"
+      "incw x9\n"
+      "cmp x9, x10\n"
+      "mov x28, #0x0\n"
+      "mov x26, x25\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000..51e8c43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL
+{
+public:
+  typedef float operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 2;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 2;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 1;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_2VLx2VL;
+
+  StdTransformsSME<operand_type, result_type, 2, 2, 1> transforms = {};
+
+  cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000..344215b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const float *const A,
+      const float *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(K * sizeof(float)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const float *const A;
+    const float *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5c8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840500  // mova za0h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      ".inst 0x25bc4530  // whilelt pn8.s, x9, x28, VLx2\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z21.s, #1.0\n"
+      ".inst 0xa009426f  // ldnt1w { z14.s-z15.s }, p8/Z, [x19, x9, LSL #2]\n"
+      ".inst 0x808e02a0  // fmopa za0.s, p0/M, p0/M, z21.s, z14.s\n"
+      ".inst 0x808f02a1  // fmopa za1.s, p0/M, p0/M, z21.s, z15.s\n"
+      ".inst 0x808e02a2  // fmopa za2.s, p0/M, p0/M, z21.s, z14.s\n"
+      ".inst 0x808f02a3  // fmopa za3.s, p0/M, p0/M, z21.s, z15.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19, ALL, MUL #2\n"
+      "incw x20, ALL, MUL #2\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "lsr x22, x19, #0x2\n"
+      "and x21, x19, #0x3\n"
+      "ldr x20, [%x[args], %[offsetof_B]]\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x20, x9, x19, x20\n"  // bptr = B + n * kstride_bytes
+      "cbz x22, 8f\n"
+      "subs x22, x22, #0x1\n"
+      ".inst 0xa1404747  // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
+      ".inst 0xa140469f  // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
+      ".inst 0xa0414748  // ld1w { z8.s-z9.s }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa0414683  // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x20, #0x2, MUL VL]\n"
+      ".inst 0xa1424752  // ld1w { z18.s, z26.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa0424691  // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+      ".inst 0xa1434756  // ld1w { z22.s, z30.s }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa143468c  // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x20, #0x6, MUL VL]\n"
+      "addvl x20, x20, #8\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x809700e0  // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
+      "subs x22, x22, #0x1\n"
+      ".inst 0x809f00e1  // fmopa za1.s, p0/M, p0/M, z7.s, z31.s\n"
+      ".inst 0x809701e2  // fmopa za2.s, p0/M, p0/M, z15.s, z23.s\n"
+      ".inst 0x809f01e3  // fmopa za3.s, p0/M, p0/M, z15.s, z31.s\n"
+      ".inst 0xa1404747  // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
+      ".inst 0x80820100  // fmopa za0.s, p0/M, p0/M, z8.s, z2.s\n"
+      ".inst 0xa140469f  // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
+      ".inst 0x80830101  // fmopa za1.s, p0/M, p0/M, z8.s, z3.s\n"
+      ".inst 0x80820122  // fmopa za2.s, p0/M, p0/M, z9.s, z2.s\n"
+      ".inst 0x80830123  // fmopa za3.s, p0/M, p0/M, z9.s, z3.s\n"
+      ".inst 0xa0414748  // ld1w { z8.s-z9.s }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0x80900240  // fmopa za0.s, p0/M, p0/M, z18.s, z16.s\n"
+      ".inst 0xa0414683  // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x20, #0x2, MUL VL]\n"
+      ".inst 0x80910241  // fmopa za1.s, p0/M, p0/M, z18.s, z17.s\n"
+      ".inst 0x80900342  // fmopa za2.s, p0/M, p0/M, z26.s, z16.s\n"
+      ".inst 0x80910343  // fmopa za3.s, p0/M, p0/M, z26.s, z17.s\n"
+      ".inst 0xa1424752  // ld1w { z18.s, z26.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa0424691  // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+      ".inst 0x808402c0  // fmopa za0.s, p0/M, p0/M, z22.s, z4.s\n"
+      ".inst 0x808c02c1  // fmopa za1.s, p0/M, p0/M, z22.s, z12.s\n"
+      ".inst 0x808403c2  // fmopa za2.s, p0/M, p0/M, z30.s, z4.s\n"
+      ".inst 0x808c03c3  // fmopa za3.s, p0/M, p0/M, z30.s, z12.s\n"
+      ".inst 0xa1434756  // ld1w { z22.s, z30.s }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa143468c  // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x20, #0x6, MUL VL]\n"
+      "addvl x20, x20, #8\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x809700e0  // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
+      ".inst 0x809f00e1  // fmopa za1.s, p0/M, p0/M, z7.s, z31.s\n"
+      ".inst 0x809701e2  // fmopa za2.s, p0/M, p0/M, z15.s, z23.s\n"
+      ".inst 0x809f01e3  // fmopa za3.s, p0/M, p0/M, z15.s, z31.s\n"
+      ".inst 0x80820100  // fmopa za0.s, p0/M, p0/M, z8.s, z2.s\n"
+      ".inst 0x80830101  // fmopa za1.s, p0/M, p0/M, z8.s, z3.s\n"
+      ".inst 0x80820122  // fmopa za2.s, p0/M, p0/M, z9.s, z2.s\n"
+      ".inst 0x80830123  // fmopa za3.s, p0/M, p0/M, z9.s, z3.s\n"
+      ".inst 0x80900240  // fmopa za0.s, p0/M, p0/M, z18.s, z16.s\n"
+      ".inst 0x80910241  // fmopa za1.s, p0/M, p0/M, z18.s, z17.s\n"
+      ".inst 0x80900342  // fmopa za2.s, p0/M, p0/M, z26.s, z16.s\n"
+      ".inst 0x80910343  // fmopa za3.s, p0/M, p0/M, z26.s, z17.s\n"
+      ".inst 0x808402c0  // fmopa za0.s, p0/M, p0/M, z22.s, z4.s\n"
+      ".inst 0x808c02c1  // fmopa za1.s, p0/M, p0/M, z22.s, z12.s\n"
+      ".inst 0x808403c2  // fmopa za2.s, p0/M, p0/M, z30.s, z4.s\n"
+      ".inst 0x808c03c3  // fmopa za3.s, p0/M, p0/M, z30.s, z12.s\n"
+      "8:"  // K oddments
+      "cbz x21, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa1404747  // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
+      "subs x21, x21, #0x1\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xa1404697  // ld1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
+      "addvl x20, x20, #2\n"
+      ".inst 0x809700e0  // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
+      ".inst 0x809f00e1  // fmopa za1.s, p0/M, p0/M, z7.s, z31.s\n"
+      ".inst 0x809701e2  // fmopa za2.s, p0/M, p0/M, z15.s, z23.s\n"
+      ".inst 0x809f01e3  // fmopa za3.s, p0/M, p0/M, z15.s, z31.s\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840401  // mova za1h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860468  // mova { z8.s-z11.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840702  // mova za2h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa043c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840703  // mova za3h.s[x12], { z24.s-z27.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c5b4  // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 30f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xa060c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xa061c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c5a4  // st1w { z4.s-z7.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a0  // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 30f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "tbz x15, #2, 21f\n"
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 21f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Skip activation: Accumulator row 1 loop
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Skip activation: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc086047c  // mova { z28.s-z31.s }, za3h.s[x12]\n"
+      ".inst 0xa1604334  // st1w { z20.s, z28.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604335  // st1w { z21.s, z29.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      ".inst 0xa1604336  // st1w { z22.s, z30.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "20:"  // Store to output array: Skip activation: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 21f\n"
+      "b 28f\n"
+      "21:"  // Store to output array: Skip activation: End
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 23f\n"
+      "22:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xc1b4caa4  // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4caac  // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604327  // st1w { z7.s, z15.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 22b\n"
+      "23:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 24f\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xc1b4caa0  // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4caa8  // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604320  // st1w { z0.s, z8.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604321  // st1w { z1.s, z9.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      ".inst 0xa1604322  // st1w { z2.s, z10.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "24:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 26f\n"
+      "25:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xc1b4cab0  // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4cab8  // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      ".inst 0xa1604333  // st1w { z19.s, z27.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 25b\n"
+      "26:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 27f\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xc1b4cab0  // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4cab8  // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "27:"  // Store to output array: Accumulator row 1 oddments: End
+      "28:"  // Store to output array: End
+      "tbz x15, #0, 30f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "29:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5c8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840503  // mova za3h.s[x12], { z8.s-z11.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 29b\n"
+      "30:"  // End block
+      "incw x9, ALL, MUL #2\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #2\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000..a315ebb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL
+{
+public:
+  typedef float operand_type;
+  typedef float result_type;
+
+  typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<float>() * 4;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<float>() * 1;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 1;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return true;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_4VLx1VL;
+
+  StdTransformsSME<operand_type, result_type, 4, 1, 1> transforms = {};
+
+  cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000..5252e81
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const float *const A,
+      const float *const B,
+      float *const C, const int ldc,
+      const int M, const int N, const int K,
+      const float *const bias,
+      const Activation act,
+      bool accumulate,
+      float *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(K * sizeof(float)),
+        C(C), ldcb(ldc * sizeof(float)),
+        M(M), N(N), K(K),
+        n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
+        min(-std::numeric_limits<float>::infinity()),
+        max(std::numeric_limits<float>::infinity()),
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (act.type == Activation::Type::None)
+      {
+        flags |= 1 << 2;  // SKIP_ACTIVATION
+      }
+
+      // Initialise the activation values
+      switch (act.type)
+      {
+        default:
+        case Activation::Type::None:
+            break;
+        case Activation::Type::BoundedReLU:
+            this->max = static_cast<float>(act.param1);
+            /* fall through */
+        case Activation::Type::ReLU:
+            this->min = static_cast<float>(0);
+            break;
+      }
+    }
+
+    const float *const A;
+    const float *const B;
+    const long kstride_bytes;
+    float *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    float min = -std::numeric_limits<float>::infinity();
+    float max = std::numeric_limits<float>::infinity();
+
+    const float *const bias;
+
+    float *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840480  // mova za0h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa041c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa042c1c0  // ld1w { z0.s-z3.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      "whilelt p0.s, x9, x28\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "fmov z8.s, #1.0\n"
+      "ldnt1w { z27.s }, p0/Z, [x19, x9, LSL #2]\n"
+      ".inst 0x809b2500  // fmopa za0.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2501  // fmopa za1.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2502  // fmopa za2.s, p1/M, p1/M, z8.s, z27.s\n"
+      ".inst 0x809b2503  // fmopa za3.s, p1/M, p1/M, z8.s, z27.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19\n"
+      "incw x20, ALL, MUL #4\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "lsr x22, x19, #0x2\n"
+      "and x21, x19, #0x3\n"
+      "ldr x20, [%x[args], %[offsetof_B]]\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x20, x9, x19, x20\n"  // bptr = B + n * kstride_bytes
+      "cbz x22, 8f\n"
+      "subs x22, x22, #0x1\n"
+      ".inst 0xa040c344  // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
+      "ldnt1w { z29.s }, p1/Z, [x20]\n"
+      ".inst 0xa041c34c  // ld1w { z12.s-z15.s }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      "ldnt1w { z23.s }, p1/Z, [x20, #1, MUL VL]\n"
+      ".inst 0xa042c340  // ld1w { z0.s-z3.s }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n"
+      ".inst 0xa143c352  // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+      "addvl x20, x20, #4\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0x809d2480  // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
+      "subs x22, x22, #0x1\n"
+      ".inst 0x809d24a1  // fmopa za1.s, p1/M, p1/M, z5.s, z29.s\n"
+      ".inst 0x809d24c2  // fmopa za2.s, p1/M, p1/M, z6.s, z29.s\n"
+      ".inst 0x809d24e3  // fmopa za3.s, p1/M, p1/M, z7.s, z29.s\n"
+      ".inst 0xa040c344  // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
+      ".inst 0x80972580  // fmopa za0.s, p1/M, p1/M, z12.s, z23.s\n"
+      "ldnt1w { z29.s }, p1/Z, [x20]\n"
+      ".inst 0x809725a1  // fmopa za1.s, p1/M, p1/M, z13.s, z23.s\n"
+      ".inst 0x809725c2  // fmopa za2.s, p1/M, p1/M, z14.s, z23.s\n"
+      ".inst 0x809725e3  // fmopa za3.s, p1/M, p1/M, z15.s, z23.s\n"
+      ".inst 0xa041c34c  // ld1w { z12.s-z15.s }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0x80952400  // fmopa za0.s, p1/M, p1/M, z0.s, z21.s\n"
+      "ldnt1w { z23.s }, p1/Z, [x20, #1, MUL VL]\n"
+      ".inst 0x80952421  // fmopa za1.s, p1/M, p1/M, z1.s, z21.s\n"
+      ".inst 0x80952442  // fmopa za2.s, p1/M, p1/M, z2.s, z21.s\n"
+      ".inst 0x80952463  // fmopa za3.s, p1/M, p1/M, z3.s, z21.s\n"
+      ".inst 0xa042c340  // ld1w { z0.s-z3.s }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n"
+      ".inst 0x809b2640  // fmopa za0.s, p1/M, p1/M, z18.s, z27.s\n"
+      ".inst 0x809b26c1  // fmopa za1.s, p1/M, p1/M, z22.s, z27.s\n"
+      ".inst 0x809b2742  // fmopa za2.s, p1/M, p1/M, z26.s, z27.s\n"
+      ".inst 0x809b27c3  // fmopa za3.s, p1/M, p1/M, z30.s, z27.s\n"
+      ".inst 0xa143c352  // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+      "addvl x20, x20, #4\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0x809d2480  // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
+      ".inst 0x809d24a1  // fmopa za1.s, p1/M, p1/M, z5.s, z29.s\n"
+      ".inst 0x809d24c2  // fmopa za2.s, p1/M, p1/M, z6.s, z29.s\n"
+      ".inst 0x809d24e3  // fmopa za3.s, p1/M, p1/M, z7.s, z29.s\n"
+      ".inst 0x80972580  // fmopa za0.s, p1/M, p1/M, z12.s, z23.s\n"
+      ".inst 0x809725a1  // fmopa za1.s, p1/M, p1/M, z13.s, z23.s\n"
+      ".inst 0x809725c2  // fmopa za2.s, p1/M, p1/M, z14.s, z23.s\n"
+      ".inst 0x809725e3  // fmopa za3.s, p1/M, p1/M, z15.s, z23.s\n"
+      ".inst 0x80952400  // fmopa za0.s, p1/M, p1/M, z0.s, z21.s\n"
+      ".inst 0x80952421  // fmopa za1.s, p1/M, p1/M, z1.s, z21.s\n"
+      ".inst 0x80952442  // fmopa za2.s, p1/M, p1/M, z2.s, z21.s\n"
+      ".inst 0x80952463  // fmopa za3.s, p1/M, p1/M, z3.s, z21.s\n"
+      ".inst 0x809b2640  // fmopa za0.s, p1/M, p1/M, z18.s, z27.s\n"
+      ".inst 0x809b26c1  // fmopa za1.s, p1/M, p1/M, z22.s, z27.s\n"
+      ".inst 0x809b2742  // fmopa za2.s, p1/M, p1/M, z26.s, z27.s\n"
+      ".inst 0x809b27c3  // fmopa za3.s, p1/M, p1/M, z30.s, z27.s\n"
+      "8:"  // K oddments
+      "cbz x21, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa040c344  // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
+      "subs x21, x21, #0x1\n"
+      "addvl x26, x26, #4\n"
+      "ld1w { z29.s }, p1/Z, [x20]\n"
+      "addvl x20, x20, #1\n"
+      ".inst 0x809d2480  // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
+      ".inst 0x809d24a1  // fmopa za1.s, p1/M, p1/M, z5.s, z29.s\n"
+      ".inst 0x809d24c2  // fmopa za2.s, p1/M, p1/M, z6.s, z29.s\n"
+      ".inst 0x809d24e3  // fmopa za3.s, p1/M, p1/M, z7.s, z29.s\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c1c8  // ld1w { z8.s-z11.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0860418  // mova { z24.s-z27.s }, za0h.s[x12]\n"
+      ".inst 0xc0840500  // mova za0h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xa041c1cc  // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xa042c1c8  // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840502  // mova za2h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xa043c1dc  // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c1a4  // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 42f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc0860438  // mova { z24.s-z27.s }, za1h.s[x12]\n"
+      ".inst 0xa060c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13]\n"
+      ".inst 0xc0860440  // mova { z0.s-z3.s }, za2h.s[x12]\n"
+      ".inst 0xc0860468  // mova { z8.s-z11.s }, za3h.s[x12]\n"
+      ".inst 0xa061c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 42f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "tbz x15, #2, 27f\n"
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Skip activation: Accumulator row 0 loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Skip activation: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 17f\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "17:"  // Store to output array: Skip activation: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Skip activation: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Skip activation: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 20f\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "20:"  // Store to output array: Skip activation: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 22f\n"
+      "21:"  // Store to output array: Skip activation: Accumulator row 2 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 21b\n"
+      "22:"  // Store to output array: Skip activation: Accumulator row 2 oddments
+      "cbz x19, 23f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 23f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 23f\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "23:"  // Store to output array: Skip activation: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 25f\n"
+      "24:"  // Store to output array: Skip activation: Accumulator row 3 loop
+      ".inst 0xc0860464  // mova { z4.s-z7.s }, za3h.s[x12]\n"
+      "st1w { z4.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z5.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z6.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z7.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 24b\n"
+      "25:"  // Store to output array: Skip activation: Accumulator row 3 oddments
+      "cbz x19, 26f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      "st1w { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 26f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 26f\n"
+      "st1w { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "26:"  // Store to output array: Skip activation: Accumulator row 3 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "b 40f\n"
+      "27:"  // Store to output array: Skip activation: End
+      "cntw x22\n"
+      "cmp x24, x22\n"
+      "ld1rw { z25.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "ld1rw { z24.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 29f\n"
+      "28:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860414  // mova { z20.s-z23.s }, za0h.s[x12]\n"
+      ".inst 0xc1b8cb34  // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 28b\n"
+      "29:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 30f\n"
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb28  // fclamp { z8.s-z11.s }, z25.s, z24.s\n"
+      "st1w { z8.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 30f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z9.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 30f\n"
+      "st1w { z10.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "30:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 32f\n"
+      "31:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 31b\n"
+      "32:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 33f\n"
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 33f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 33f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "33:"  // Store to output array: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 35f\n"
+      "34:"  // Store to output array: Accumulator row 2 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 34b\n"
+      "35:"  // Store to output array: Accumulator row 2 oddments
+      "cbz x19, 36f\n"
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 36f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 36f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "36:"  // Store to output array: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 40f\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 38f\n"
+      "37:"  // Store to output array: Accumulator row 3 loop
+      ".inst 0xc0860474  // mova { z20.s-z23.s }, za3h.s[x12]\n"
+      ".inst 0xc1b8cb34  // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      "st1w { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1w { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 37b\n"
+      "38:"  // Store to output array: Accumulator row 3 oddments
+      "cbz x19, 39f\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1b8cb30  // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 39f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 39f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "39:"  // Store to output array: Accumulator row 3 oddments: End
+      "40:"  // Store to output array: End
+      "tbz x15, #0, 42f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "41:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840682  // mova za2h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 41b\n"
+      "42:"  // End block
+      "incw x9\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #4\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000..b8bcd53
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int8_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 1;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 4;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_1VLx4VL;
+
+  StdTransformsSME<operand_type, result_type, 1, 4, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000..62170c4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<int8_t>::min();
+    int32_t max = std::numeric_limits<int8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x13, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x13, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa041c57c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840781  // mova za1h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa042c57c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840782  // mova za2h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa043c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x11, x11, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w9, [%x[args], %[offsetof_M]]\n"
+      "mov x28, #0x0\n"
+      "mov x27, #0x0\n"
+      "ldr w26, [%x[args], %[offsetof_N]]\n"
+      "ldr x25, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x24, x25\n"
+      ".inst 0x25ba6770  // whilelt pn8.s, x27, x26, VLx4\n"
+      "tbnz x13, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa01bc279  // ldnt1w { z24.s-z27.s }, p8/Z, [x19, x27, LSL #2]\n"
+      ".inst 0xc0902700  // addha za0.s, p1/M, p1/M, z24.s\n"
+      ".inst 0xc0902721  // addha za1.s, p1/M, p1/M, z25.s\n"
+      ".inst 0xc0902742  // addha za2.s, p1/M, p1/M, z26.s\n"
+      ".inst 0xc0902763  // addha za3.s, p1/M, p1/M, z27.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x27\n"
+      "mov x20, x28\n"
+      "incw x19, ALL, MUL #4\n"
+      "incw x20\n"
+      "cmp x19, x26\n"
+      "csel x20, x28, x20, LT\n"
+      "mov x19, x13\n"
+      "bfm x13, XZR, #0x0, #0x0  // bfc x13, #0x0, #0x1\n"
+      "cmp x20, x9\n"
+      "csel x13, x19, x13, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x27, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      ".inst 0xa04086dd  // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0xa04186cd  // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0xa04286d9  // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0xa04386c1  // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa09c2540  // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa09d2541  // smopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa09e2542  // smopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa09f2543  // smopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      ".inst 0xa08c2600  // smopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
+      ".inst 0xa04086dd  // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa08d2601  // smopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
+      ".inst 0xa08e2602  // smopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
+      ".inst 0xa08f2603  // smopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
+      "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0xa09826a0  // smopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
+      ".inst 0xa04186cd  // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa09926a1  // smopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
+      ".inst 0xa09a26a2  // smopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
+      ".inst 0xa09b26a3  // smopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
+      "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0xa04286d9  // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      ".inst 0xa0802660  // smopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
+      ".inst 0xa0812661  // smopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
+      ".inst 0xa0822662  // smopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
+      ".inst 0xa0832663  // smopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
+      "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0xa04386c1  // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa09c2540  // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      ".inst 0xa09d2541  // smopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa09e2542  // smopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa09f2543  // smopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      ".inst 0xa08c2600  // smopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
+      ".inst 0xa08d2601  // smopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
+      ".inst 0xa08e2602  // smopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
+      ".inst 0xa08f2603  // smopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
+      ".inst 0xa09826a0  // smopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
+      ".inst 0xa09926a1  // smopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
+      ".inst 0xa09a26a2  // smopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
+      ".inst 0xa09b26a3  // smopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
+      ".inst 0xa0802660  // smopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
+      ".inst 0xa0812661  // smopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
+      ".inst 0xa0822662  // smopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
+      ".inst 0xa0832663  // smopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x24, x24, #1\n"
+      ".inst 0xa04086dc  // ld1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #4\n"
+      ".inst 0xa09c2540  // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      ".inst 0xa09d2541  // smopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa09e2542  // smopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa09f2543  // smopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "ld1w { z14.s }, p1/Z, [x24]\n"
+      "addvl x24, x24, #1\n"
+      ".inst 0xc09125c0  // addva za0.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c1  // addva za1.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c2  // addva za2.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c3  // addva za3.s, p1/M, p1/M, z14.s\n"
+      "tbz x13, #1, 14f\n"
+      "tbz x13, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c578  // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc0840700  // mova za0h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa041c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xc0860458  // mova { z24.s-z27.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa042c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c55c  // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
+      "addvl x11, x11, #16\n"
+      ".inst 0xa061c548  // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+      ".inst 0xa062c558  // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+      ".inst 0xa063c54c  // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+      "addvl x10, x10, #16\n"
+      "blt 11b\n"
+      "b 21f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc0860420  // mova { z0.s-z3.s }, za1h.s[x12]\n"
+      ".inst 0xa060c55c  // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa061c540  // st1w { z0.s-z3.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c548  // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+      ".inst 0xa063c550  // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+      "addvl x10, x10, #16\n"
+      "blt 13b\n"
+      "b 21f\n"
+      "14:"  // Store to output array
+      "ldr x23, [%x[args], %[offsetof_C]]\n"
+      "add x23, x23, x27\n"  // C += n
+      "sub x22, x9, x28\n"
+      "ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x23, x28, x21, x23\n"  // C += m * ldc
+      "ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x13, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x27\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa040c26c  // ld1w { z12.s-z15.s }, p8/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa040c264  // ld1w { z4.s-z7.s }, p8/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x19\n"
+      "whilelt p0.b, x27, x26\n"
+      "cmp x22, x19\n"
+      "csel x19, x22, x19, LT\n"
+      "lsr x20, x19, #0x1\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x1\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086001a  // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
+      ".inst 0xc086005c  // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
+      ".inst 0xc1aca41a  // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z12.s\n"
+      ".inst 0xc0860096  // mova { z22.s-z23.s }, za2h.s[x12, 0:1]\n"
+      ".inst 0xc08600d0  // mova { z16.s-z17.s }, za3h.s[x12, 0:1]\n"
+      ".inst 0xc1ada41c  // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
+      ".inst 0xc1aea416  // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x20, LSL #1\n"
+      ".inst 0xc1afa410  // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
+      ".inst 0xc1a4a23a  // srshl { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
+      ".inst 0xc1a5a23c  // srshl { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
+      ".inst 0xc1a6a236  // srshl { z22.s-z23.s }, { z22.s-z23.s }, z6.s\n"
+      ".inst 0xc1a7a230  // srshl { z16.s-z17.s }, { z16.s-z17.s }, z7.s\n"
+      ".inst 0xc1a1a31a  // add { z26.s-z27.s }, { z26.s-z27.s }, z1.s\n"
+      ".inst 0xc1a1a31c  // add { z28.s-z29.s }, { z28.s-z29.s }, z1.s\n"
+      ".inst 0xc1a1a316  // add { z22.s-z23.s }, { z22.s-z23.s }, z1.s\n"
+      ".inst 0xc1a1a310  // add { z16.s-z17.s }, { z16.s-z17.s }, z1.s\n"
+      ".inst 0xc1b4c6ba  // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6bc  // sclamp { z28.s-z29.s }, z21.s, z20.s\n"
+      "uzp1 z19.b, z26.b, z28.b\n"
+      ".inst 0xc1b4c6b6  // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6b0  // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
+      "uzp1 z16.b, z22.b, z16.b\n"
+      "uzp1 z18.b, z27.b, z29.b\n"
+      "uzp1 z17.b, z23.b, z17.b\n"
+      "uzp1 z16.b, z19.b, z16.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "add x23, x23, x21\n"
+      "uzp1 z16.b, z18.b, z17.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "add x23, x23, x21\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc0860002  // mova { z2.s-z3.s }, za0h.s[x12, 0:1]\n"
+      ".inst 0xc0860058  // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
+      ".inst 0xc1aca402  // sqdmulh { z2.s-z3.s }, { z2.s-z3.s }, z12.s\n"
+      ".inst 0xc0860090  // mova { z16.s-z17.s }, za2h.s[x12, 0:1]\n"
+      ".inst 0xc08600ca  // mova { z10.s-z11.s }, za3h.s[x12, 0:1]\n"
+      ".inst 0xc1ada418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z13.s\n"
+      ".inst 0xc1aea410  // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z14.s\n"
+      ".inst 0xc1afa40a  // sqdmulh { z10.s-z11.s }, { z10.s-z11.s }, z15.s\n"
+      ".inst 0xc1a4a222  // srshl { z2.s-z3.s }, { z2.s-z3.s }, z4.s\n"
+      ".inst 0xc1a5a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z5.s\n"
+      ".inst 0xc1a6a230  // srshl { z16.s-z17.s }, { z16.s-z17.s }, z6.s\n"
+      ".inst 0xc1a7a22a  // srshl { z10.s-z11.s }, { z10.s-z11.s }, z7.s\n"
+      ".inst 0xc1a1a302  // add { z2.s-z3.s }, { z2.s-z3.s }, z1.s\n"
+      ".inst 0xc1a1a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+      ".inst 0xc1a1a310  // add { z16.s-z17.s }, { z16.s-z17.s }, z1.s\n"
+      ".inst 0xc1a1a30a  // add { z10.s-z11.s }, { z10.s-z11.s }, z1.s\n"
+      ".inst 0xc1b4c6a2  // sclamp { z2.s-z3.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6b8  // sclamp { z24.s-z25.s }, z21.s, z20.s\n"
+      "uzp1 z23.b, z2.b, z24.b\n"
+      ".inst 0xc1b4c6b0  // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6aa  // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
+      "uzp1 z16.b, z16.b, z10.b\n"
+      "uzp1 z16.b, z23.b, z16.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "19:"  // Store to output array: End
+      "tbz x13, #0, 21f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "20:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x11, x11, #16\n"
+      "blt 20b\n"
+      "21:"  // End block
+      "incw x27, ALL, MUL #4\n"
+      "cmp x27, x26\n"
+      "blt 3b\n"
+      "incw x28\n"
+      "cmp x28, x9\n"
+      "mov x27, #0x0\n"
+      "mov x25, x24\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000..954b0da
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int8_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 2;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 2;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_2VLx2VL;
+
+  StdTransformsSME<operand_type, result_type, 2, 2, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000..e565699
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<int8_t>::min();
+    int32_t max = std::numeric_limits<int8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840400  // mova za0h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa041c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      ".inst 0x25bc4530  // whilelt pn8.s, x9, x28, VLx2\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa0094275  // ldnt1w { z20.s-z21.s }, p8/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc0902680  // addha za0.s, p1/M, p1/M, z20.s\n"
+      ".inst 0xc09026a1  // addha za1.s, p1/M, p1/M, z21.s\n"
+      ".inst 0xc0902682  // addha za2.s, p1/M, p1/M, z20.s\n"
+      ".inst 0xc09026a3  // addha za3.s, p1/M, p1/M, z21.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19, ALL, MUL #2\n"
+      "incw x20, ALL, MUL #2\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa04006d1  // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa041074e  // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa04106c9  // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa0420740  // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa14206dc  // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306ca  // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa09027c0  // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa09127c1  // smopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa09027e2  // smopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa09127e3  // smopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa08825c0  // smopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
+      ".inst 0xa04006d1  // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa08925c1  // smopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
+      ".inst 0xa08825e2  // smopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
+      ".inst 0xa08925e3  // smopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
+      ".inst 0xa041074e  // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa0942400  // smopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
+      ".inst 0xa04106c9  // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa09c2401  // smopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
+      ".inst 0xa0942422  // smopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
+      ".inst 0xa09c2423  // smopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+      ".inst 0xa0420740  // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa14206dc  // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0822480  // smopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
+      ".inst 0xa08a2481  // smopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
+      ".inst 0xa08224a2  // smopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
+      ".inst 0xa08a24a3  // smopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306ca  // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa09027c0  // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      ".inst 0xa09127c1  // smopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa09027e2  // smopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa09127e3  // smopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      ".inst 0xa08825c0  // smopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
+      ".inst 0xa08925c1  // smopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
+      ".inst 0xa08825e2  // smopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
+      ".inst 0xa08925e3  // smopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
+      ".inst 0xa0942400  // smopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
+      ".inst 0xa09c2401  // smopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
+      ".inst 0xa0942422  // smopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
+      ".inst 0xa09c2423  // smopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+      ".inst 0xa0822480  // smopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
+      ".inst 0xa08a2481  // smopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
+      ".inst 0xa08224a2  // smopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
+      ".inst 0xa08a24a3  // smopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xa04006d0  // ld1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #2\n"
+      ".inst 0xa09027c0  // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      ".inst 0xa09127c1  // smopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa09027e2  // smopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa09127e3  // smopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      ".inst 0xa040474e  // ld1w { z14.s-z15.s }, pn9.b/Z, [x26]\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xc09125c0  // addva za0.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c1  // addva za1.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125e2  // addva za2.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09125e3  // addva za3.s, p1/M, p1/M, z15.s\n"
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xc0860434  // mova { z20.s-z23.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840702  // mova za2h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa043c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c5b4  // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c5bc  // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 24f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xa060c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa061c5a4  // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 24f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x15, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x9\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa0404262  // ld1w { z2.s-z3.s }, p8/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa0404260  // ld1w { z0.s-z1.s }, p8/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x22\n"
+      "whilelt p0.h, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xc1a2ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+      ".inst 0xc1a3ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a0aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+      ".inst 0xc1a1aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
+      ".inst 0xc1abab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
+      ".inst 0xc1abab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+      ".inst 0xc1b8cf2c  // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf3c  // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z12.h, z28.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z13.h, z29.h\n"
+      "uzp1 z17.h, z14.h, z30.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z15.h, z31.h\n"
+      "st1b { z17.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xc1a2ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+      ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a0aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
+      ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+      ".inst 0xc1abab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+      ".inst 0xc1abab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
+      ".inst 0xc1b8cf3c  // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf2c  // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z28.h, z12.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "subs x19, x19, #0x1\n"
+      "uzp1 z16.h, z29.h, z13.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "uzp1 z16.h, z30.h, z14.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 22f\n"
+      "whilelt p0.h, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xc1a2ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+      ".inst 0xc1a3ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a0aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+      ".inst 0xc1a1aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+      ".inst 0xc1abab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+      ".inst 0xc1abab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+      ".inst 0xc1b8cf24  // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf30  // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z4.h, z16.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z5.h, z17.h\n"
+      "uzp1 z17.h, z6.h, z18.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z7.h, z19.h\n"
+      "st1b { z17.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xc1a2ac14  // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+      ".inst 0xc1a3ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a0aa34  // srshl { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
+      ".inst 0xc1a1aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+      ".inst 0xc1abab14  // add { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+      ".inst 0xc1abab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+      ".inst 0xc1b8cf34  // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf30  // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z20.h, z16.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      "uzp1 z16.h, z21.h, z17.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "uzp1 z16.h, z22.h, z18.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "21:"  // Store to output array: Accumulator row 1 oddments: End
+      "22:"  // Store to output array: End
+      "tbz x15, #0, 24f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "23:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5c4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 23b\n"
+      "24:"  // End block
+      "incw x9, ALL, MUL #2\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #2\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000..420c219
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8q_mopa_4VLx1VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int8_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 4;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 1;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_4VLx1VL;
+
+  StdTransformsSME<operand_type, result_type, 4, 1, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000..a738a10
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<int8_t>::min();
+    int32_t max = std::numeric_limits<int8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c1dc  // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa041c1cc  // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840682  // mova za2h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xa043c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840703  // mova za3h.s[x12], { z24.s-z27.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      "whilelt p0.s, x9, x28\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc09025e0  // addha za0.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e1  // addha za1.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e2  // addha za2.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e3  // addha za3.s, p1/M, p1/M, z15.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19\n"
+      "incw x20, ALL, MUL #4\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      "ldnt1b { z0.b }, p1/Z, [x22]\n"
+      ".inst 0xa1418353  // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa1428350  // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa1438342  // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa0802640  // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa08026c1  // smopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa0802742  // smopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa08027c3  // smopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      ".inst 0xa0892660  // smopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
+      "ldnt1b { z0.b }, p1/Z, [x22]\n"
+      ".inst 0xa08926e1  // smopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
+      ".inst 0xa0892762  // smopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
+      ".inst 0xa08927e3  // smopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
+      ".inst 0xa1418353  // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa0952600  // smopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
+      "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa0952681  // smopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
+      ".inst 0xa0952702  // smopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
+      ".inst 0xa0952783  // smopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
+      ".inst 0xa1428350  // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa08c2440  // smopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa08c24c1  // smopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
+      ".inst 0xa08c2542  // smopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
+      ".inst 0xa08c25c3  // smopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
+      ".inst 0xa1438342  // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa0802640  // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      ".inst 0xa08026c1  // smopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa0802742  // smopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa08027c3  // smopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      ".inst 0xa0892660  // smopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
+      ".inst 0xa08926e1  // smopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
+      ".inst 0xa0892762  // smopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
+      ".inst 0xa08927e3  // smopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
+      ".inst 0xa0952600  // smopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
+      ".inst 0xa0952681  // smopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
+      ".inst 0xa0952702  // smopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
+      ".inst 0xa0952783  // smopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
+      ".inst 0xa08c2440  // smopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa08c24c1  // smopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
+      ".inst 0xa08c2542  // smopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
+      ".inst 0xa08c25c3  // smopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #4\n"
+      "ld1b { z0.b }, p1/Z, [x22]\n"
+      "addvl x22, x22, #1\n"
+      ".inst 0xa0802640  // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      ".inst 0xa08026c1  // smopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa0802742  // smopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa08027c3  // smopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      ".inst 0xa040c340  // ld1w { z0.s-z3.s }, pn8.b/Z, [x26]\n"
+      "addvl x26, x26, #4\n"
+      ".inst 0xc0912400  // addva za0.s, p1/M, p1/M, z0.s\n"
+      ".inst 0xc0912421  // addva za1.s, p1/M, p1/M, z1.s\n"
+      ".inst 0xc0912442  // addva za2.s, p1/M, p1/M, z2.s\n"
+      ".inst 0xc0912463  // addva za3.s, p1/M, p1/M, z3.s\n"
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0840680  // mova za0h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa041c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840481  // mova za1h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc086047c  // mova { z28.s-z31.s }, za3h.s[x12]\n"
+      ".inst 0xa042c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840683  // mova za3h.s[x12], { z20.s-z23.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c1b0  // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1bc  // st1w { z28.s-z31.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 30f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa060c1b0  // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xa061c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c1b4  // st1w { z20.s-z23.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 30f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x15, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x9\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      "ld1w { z8.s }, p0/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      "ld1w { z7.s }, p0/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x22\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc1a8ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a4ccac  // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
+      "st1b { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z15.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc1a8ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
+      ".inst 0xc1a6ab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+      ".inst 0xc1a4ccb0  // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
+      "st1b { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "st1b { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xc1a8ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+      ".inst 0xc1a4ccb0  // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
+      "st1b { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a4ccbc  // sclamp { z28.s-z31.s }, z5.s, z4.s\n"
+      "st1b { z28.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z29.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "st1b { z30.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "21:"  // Store to output array: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 23f\n"
+      "22:"  // Store to output array: Accumulator row 2 loop
+      ".inst 0xc0860458  // mova { z24.s-z27.s }, za2h.s[x12]\n"
+      ".inst 0xc1a8ac18  // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa38  // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab18  // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
+      ".inst 0xc1a4ccb8  // sclamp { z24.s-z27.s }, z5.s, z4.s\n"
+      "st1b { z24.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z25.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z26.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z27.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 22b\n"
+      "23:"  // Store to output array: Accumulator row 2 oddments
+      "cbz x19, 24f\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc1a8ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a4ccac  // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
+      "st1b { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "st1b { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "24:"  // Store to output array: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 26f\n"
+      "25:"  // Store to output array: Accumulator row 3 loop
+      ".inst 0xc0860474  // mova { z20.s-z23.s }, za3h.s[x12]\n"
+      ".inst 0xc1a8ac14  // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa34  // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab14  // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
+      ".inst 0xc1a4ccb4  // sclamp { z20.s-z23.s }, z5.s, z4.s\n"
+      "st1b { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 25b\n"
+      "26:"  // Store to output array: Accumulator row 3 oddments
+      "cbz x19, 27f\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xc1a8ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z7.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1a4cca0  // sclamp { z0.s-z3.s }, z5.s, z4.s\n"
+      "st1b { z0.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z1.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "st1b { z2.s }, p0, [x25]\n"
+      "27:"  // Store to output array: Accumulator row 3 oddments: End
+      "28:"  // Store to output array: End
+      "tbz x15, #0, 30f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "29:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840480  // mova za0h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa041c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 29b\n"
+      "30:"  // End block
+      "incw x9\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #4\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000..c969c7a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int32_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 1;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 4;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL;
+
+  StdTransformsSME<operand_type, result_type, 1, 4, 4> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000..7ddd7c2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+  ARM_COMPUTE_UNUSED(act);
+
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int32_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int32_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int32_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+
+    const int32_t *const bias;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x11, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x9, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x11, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c544  // ld1w { z4.s-z7.s }, pn9.b/Z, [x10]\n"
+      ".inst 0xc0840480  // mova za0h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa041c55c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
+      ".inst 0xc0840781  // mova za1h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa042c550  // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c540  // ld1w { z0.s-z3.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
+      ".inst 0xc0840403  // mova za3h.s[x12], { z0.s-z3.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x10, x10, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w28, [%x[args], %[offsetof_M]]\n"
+      "mov x27, #0x0\n"
+      "mov x26, #0x0\n"
+      "ldr w25, [%x[args], %[offsetof_N]]\n"
+      "ldr x24, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x23, x24\n"
+      ".inst 0x25b96750  // whilelt pn8.s, x26, x25, VLx4\n"
+      "tbnz x11, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa11ac26a  // ldnt1w { z2.s, z6.s, z10.s, z14.s }, p8/Z, [x19, x26, LSL #2]\n"
+      ".inst 0xc0900040  // addha za0.s, p0/M, p0/M, z2.s\n"
+      ".inst 0xc09000c1  // addha za1.s, p0/M, p0/M, z6.s\n"
+      ".inst 0xc0900142  // addha za2.s, p0/M, p0/M, z10.s\n"
+      ".inst 0xc09001c3  // addha za3.s, p0/M, p0/M, z14.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x26\n"
+      "mov x20, x27\n"
+      "incw x19, ALL, MUL #4\n"
+      "incw x20\n"
+      "cmp x19, x25\n"
+      "csel x20, x27, x20, LT\n"
+      "mov x19, x11\n"
+      "bfm x11, XZR, #0x0, #0x0  // bfc x11, #0x0, #0x1\n"
+      "cmp x20, x28\n"
+      "csel x11, x19, x11, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x26, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      "ld1b { z20.b }, p0/Z, [x23]\n"
+      ".inst 0xa14086c9  // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
+      "ld1b { z10.b }, p0/Z, [x23, #1, MUL VL]\n"
+      ".inst 0xa14186da  // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      "ld1b { z16.b }, p0/Z, [x23, #2, MUL VL]\n"
+      ".inst 0xa14286cb  // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      "ld1b { z25.b }, p0/Z, [x23, #3, MUL VL]\n"
+      "addvl x23, x23, #4\n"
+      ".inst 0xa14386c8  // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa0810280  // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa0850281  // smopa za1.s, p0/M, p0/M, z20.b, z5.b\n"
+      ".inst 0xa0890282  // smopa za2.s, p0/M, p0/M, z20.b, z9.b\n"
+      ".inst 0xa08d0283  // smopa za3.s, p0/M, p0/M, z20.b, z13.b\n"
+      "ld1b { z20.b }, p0/Z, [x23]\n"
+      ".inst 0xa0920140  // smopa za0.s, p0/M, p0/M, z10.b, z18.b\n"
+      ".inst 0xa14086c9  // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa0960141  // smopa za1.s, p0/M, p0/M, z10.b, z22.b\n"
+      ".inst 0xa09a0142  // smopa za2.s, p0/M, p0/M, z10.b, z26.b\n"
+      ".inst 0xa09e0143  // smopa za3.s, p0/M, p0/M, z10.b, z30.b\n"
+      "ld1b { z10.b }, p0/Z, [x23, #1, MUL VL]\n"
+      ".inst 0xa0830200  // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
+      ".inst 0xa14186da  // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0870201  // smopa za1.s, p0/M, p0/M, z16.b, z7.b\n"
+      ".inst 0xa08b0202  // smopa za2.s, p0/M, p0/M, z16.b, z11.b\n"
+      ".inst 0xa08f0203  // smopa za3.s, p0/M, p0/M, z16.b, z15.b\n"
+      "ld1b { z16.b }, p0/Z, [x23, #2, MUL VL]\n"
+      ".inst 0xa14286cb  // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      ".inst 0xa0800320  // smopa za0.s, p0/M, p0/M, z25.b, z0.b\n"
+      ".inst 0xa0840321  // smopa za1.s, p0/M, p0/M, z25.b, z4.b\n"
+      ".inst 0xa0880322  // smopa za2.s, p0/M, p0/M, z25.b, z8.b\n"
+      ".inst 0xa08c0323  // smopa za3.s, p0/M, p0/M, z25.b, z12.b\n"
+      "ld1b { z25.b }, p0/Z, [x23, #3, MUL VL]\n"
+      "addvl x23, x23, #4\n"
+      ".inst 0xa14386c8  // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa0810280  // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
+      ".inst 0xa0850281  // smopa za1.s, p0/M, p0/M, z20.b, z5.b\n"
+      ".inst 0xa0890282  // smopa za2.s, p0/M, p0/M, z20.b, z9.b\n"
+      ".inst 0xa08d0283  // smopa za3.s, p0/M, p0/M, z20.b, z13.b\n"
+      ".inst 0xa0920140  // smopa za0.s, p0/M, p0/M, z10.b, z18.b\n"
+      ".inst 0xa0960141  // smopa za1.s, p0/M, p0/M, z10.b, z22.b\n"
+      ".inst 0xa09a0142  // smopa za2.s, p0/M, p0/M, z10.b, z26.b\n"
+      ".inst 0xa09e0143  // smopa za3.s, p0/M, p0/M, z10.b, z30.b\n"
+      ".inst 0xa0830200  // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
+      ".inst 0xa0870201  // smopa za1.s, p0/M, p0/M, z16.b, z7.b\n"
+      ".inst 0xa08b0202  // smopa za2.s, p0/M, p0/M, z16.b, z11.b\n"
+      ".inst 0xa08f0203  // smopa za3.s, p0/M, p0/M, z16.b, z15.b\n"
+      ".inst 0xa0800320  // smopa za0.s, p0/M, p0/M, z25.b, z0.b\n"
+      ".inst 0xa0840321  // smopa za1.s, p0/M, p0/M, z25.b, z4.b\n"
+      ".inst 0xa0880322  // smopa za2.s, p0/M, p0/M, z25.b, z8.b\n"
+      ".inst 0xa08c0323  // smopa za3.s, p0/M, p0/M, z25.b, z12.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      "ld1b { z20.b }, p0/Z, [x23]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x23, x23, #1\n"
+      ".inst 0xa14086c1  // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #4\n"
+      ".inst 0xa0810280  // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
+      ".inst 0xa0850281  // smopa za1.s, p0/M, p0/M, z20.b, z5.b\n"
+      ".inst 0xa0890282  // smopa za2.s, p0/M, p0/M, z20.b, z9.b\n"
+      ".inst 0xa08d0283  // smopa za3.s, p0/M, p0/M, z20.b, z13.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x11, #1, 14f\n"
+      "tbz x11, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c55c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x10]\n"
+      ".inst 0xc0860404  // mova { z4.s-z7.s }, za0h.s[x12]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xc0860434  // mova { z20.s-z23.s }, za1h.s[x12]\n"
+      ".inst 0xa041c540  // ld1w { z0.s-z3.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
+      ".inst 0xc0840401  // mova za1h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xc0860458  // mova { z24.s-z27.s }, za2h.s[x12]\n"
+      ".inst 0xc086047c  // mova { z28.s-z31.s }, za3h.s[x12]\n"
+      ".inst 0xa042c550  // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c54c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c524  // st1w { z4.s-z7.s }, pn9.b, [x9]\n"
+      "addvl x10, x10, #16\n"
+      ".inst 0xa061c534  // st1w { z20.s-z23.s }, pn9.b, [x9, #0x4, MUL VL]\n"
+      ".inst 0xa062c538  // st1w { z24.s-z27.s }, pn9.b, [x9, #0x8, MUL VL]\n"
+      ".inst 0xa063c53c  // st1w { z28.s-z31.s }, pn9.b, [x9, #0xc, MUL VL]\n"
+      "addvl x9, x9, #16\n"
+      "blt 11b\n"
+      "b 20f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860414  // mova { z20.s-z23.s }, za0h.s[x12]\n"
+      ".inst 0xc0860420  // mova { z0.s-z3.s }, za1h.s[x12]\n"
+      ".inst 0xa060c534  // st1w { z20.s-z23.s }, pn9.b, [x9]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa061c520  // st1w { z0.s-z3.s }, pn9.b, [x9, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c528  // st1w { z8.s-z11.s }, pn9.b, [x9, #0x8, MUL VL]\n"
+      ".inst 0xa063c52c  // st1w { z12.s-z15.s }, pn9.b, [x9, #0xc, MUL VL]\n"
+      "addvl x9, x9, #16\n"
+      "blt 13b\n"
+      "b 20f\n"
+      "14:"  // Store to output array
+      "ldr x22, [%x[args], %[offsetof_C]]\n"
+      "sub x20, x28, x27\n"
+      "cntw x19\n"
+      "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+      "cmp x20, x19\n"
+      "csel x19, x20, x19, LT\n"
+      "add x22, x22, x26, LSL #2\n"  // C += n
+      "lsr x20, x19, #0x2\n"
+      "madd x22, x27, x21, x22\n"  // C += m * ldc
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c2c0  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      ".inst 0xa160c2c1  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa160c2c2  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa160c2c3  // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa160c2c0  // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa160c2c1  // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x22]\n"
+      "add x22, x22, x21\n"
+      "beq 17f\n"
+      ".inst 0xa160c2c2  // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x22]\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments: End
+      "18:"  // Store to output array: End
+      "tbz x11, #0, 20f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "19:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c54c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x10]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa041c550  // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c550  // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c544  // ld1w { z4.s-z7.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x10, x10, #16\n"
+      "blt 19b\n"
+      "20:"  // End block
+      "incw x26, ALL, MUL #4\n"
+      "cmp x26, x25\n"
+      "blt 3b\n"
+      "incw x27\n"
+      "cmp x27, x28\n"
+      "mov x26, #0x0\n"
+      "mov x24, x23\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000..a0705e5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int32_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 2;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 2;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL;
+
+  StdTransformsSME<operand_type, result_type, 2, 2, 4> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000..9ae18f0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+  ARM_COMPUTE_UNUSED(act);
+
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int32_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int32_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int32_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+
+    const int32_t *const bias;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p0.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5c8  // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840500  // mova za0h.s[x12], { z8.s-z11.s }\n"
+      ".inst 0xa041c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa042c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840782  // mova za2h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa043c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840603  // mova za3h.s[x12], { z16.s-z19.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      ".inst 0x25bc4530  // whilelt pn8.s, x9, x28, VLx2\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa109427c  // ldnt1w { z20.s, z28.s }, p8/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc0900280  // addha za0.s, p0/M, p0/M, z20.s\n"
+      ".inst 0xc0900381  // addha za1.s, p0/M, p0/M, z28.s\n"
+      ".inst 0xc0900282  // addha za2.s, p0/M, p0/M, z20.s\n"
+      ".inst 0xc0900383  // addha za3.s, p0/M, p0/M, z28.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19, ALL, MUL #2\n"
+      "incw x20, ALL, MUL #2\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1400756  // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa14006d9  // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa1410750  // ld1b { z16.b, z24.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa14106cb  // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa0420748  // ld1b { z8.b-z9.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa04206d3  // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306dd  // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa09102c0  // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa09902c1  // smopa za1.s, p0/M, p0/M, z22.b, z25.b\n"
+      ".inst 0xa09103c2  // smopa za2.s, p0/M, p0/M, z30.b, z17.b\n"
+      ".inst 0xa09903c3  // smopa za3.s, p0/M, p0/M, z30.b, z25.b\n"
+      ".inst 0xa1400756  // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa0830200  // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
+      ".inst 0xa14006d9  // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa08b0201  // smopa za1.s, p0/M, p0/M, z16.b, z11.b\n"
+      ".inst 0xa0830302  // smopa za2.s, p0/M, p0/M, z24.b, z3.b\n"
+      ".inst 0xa08b0303  // smopa za3.s, p0/M, p0/M, z24.b, z11.b\n"
+      ".inst 0xa1410750  // ld1b { z16.b, z24.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa0920100  // smopa za0.s, p0/M, p0/M, z8.b, z18.b\n"
+      ".inst 0xa14106cb  // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa0930101  // smopa za1.s, p0/M, p0/M, z8.b, z19.b\n"
+      ".inst 0xa0920122  // smopa za2.s, p0/M, p0/M, z9.b, z18.b\n"
+      ".inst 0xa0930123  // smopa za3.s, p0/M, p0/M, z9.b, z19.b\n"
+      ".inst 0xa0420748  // ld1b { z8.b-z9.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa04206d3  // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0950080  // smopa za0.s, p0/M, p0/M, z4.b, z21.b\n"
+      ".inst 0xa09d0081  // smopa za1.s, p0/M, p0/M, z4.b, z29.b\n"
+      ".inst 0xa09500a2  // smopa za2.s, p0/M, p0/M, z5.b, z21.b\n"
+      ".inst 0xa09d00a3  // smopa za3.s, p0/M, p0/M, z5.b, z29.b\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306dd  // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa09102c0  // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
+      ".inst 0xa09902c1  // smopa za1.s, p0/M, p0/M, z22.b, z25.b\n"
+      ".inst 0xa09103c2  // smopa za2.s, p0/M, p0/M, z30.b, z17.b\n"
+      ".inst 0xa09903c3  // smopa za3.s, p0/M, p0/M, z30.b, z25.b\n"
+      ".inst 0xa0830200  // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
+      ".inst 0xa08b0201  // smopa za1.s, p0/M, p0/M, z16.b, z11.b\n"
+      ".inst 0xa0830302  // smopa za2.s, p0/M, p0/M, z24.b, z3.b\n"
+      ".inst 0xa08b0303  // smopa za3.s, p0/M, p0/M, z24.b, z11.b\n"
+      ".inst 0xa0920100  // smopa za0.s, p0/M, p0/M, z8.b, z18.b\n"
+      ".inst 0xa0930101  // smopa za1.s, p0/M, p0/M, z8.b, z19.b\n"
+      ".inst 0xa0920122  // smopa za2.s, p0/M, p0/M, z9.b, z18.b\n"
+      ".inst 0xa0930123  // smopa za3.s, p0/M, p0/M, z9.b, z19.b\n"
+      ".inst 0xa0950080  // smopa za0.s, p0/M, p0/M, z4.b, z21.b\n"
+      ".inst 0xa09d0081  // smopa za1.s, p0/M, p0/M, z4.b, z29.b\n"
+      ".inst 0xa09500a2  // smopa za2.s, p0/M, p0/M, z5.b, z21.b\n"
+      ".inst 0xa09d00a3  // smopa za3.s, p0/M, p0/M, z5.b, z29.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa1400756  // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xa14006d1  // ld1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #2\n"
+      ".inst 0xa09102c0  // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
+      ".inst 0xa09902c1  // smopa za1.s, p0/M, p0/M, z22.b, z25.b\n"
+      ".inst 0xa09103c2  // smopa za2.s, p0/M, p0/M, z30.b, z17.b\n"
+      ".inst 0xa09903c3  // smopa za3.s, p0/M, p0/M, z30.b, z25.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0860418  // mova { z24.s-z27.s }, za0h.s[x12]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc0860420  // mova { z0.s-z3.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860468  // mova { z8.s-z11.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840582  // mova za2h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa043c5c4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c5b8  // st1w { z24.s-z27.s }, pn9.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c5a0  // st1w { z0.s-z3.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c5bc  // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 23f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa060c5a0  // st1w { z0.s-z3.s }, pn9.b, [x13]\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860464  // mova { z4.s-z7.s }, za3h.s[x12]\n"
+      ".inst 0xa061c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c5bc  // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5a4  // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 23f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "sub x24, x11, x10\n"
+      "cntw x23\n"
+      "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+      "cmp x24, x23\n"
+      "csel x21, x24, x23, LT\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "lsr x20, x21, #0x2\n"
+      "madd x25, x10, x22, x25\n"  // C += m * ldc
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0860438  // mova { z24.s-z27.s }, za1h.s[x12]\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604333  // st1w { z19.s, z27.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa1604320  // st1w { z0.s, z8.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604321  // st1w { z1.s, z9.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 17f\n"
+      ".inst 0xa1604322  // st1w { z2.s, z10.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 21f\n"
+      "cmp x24, x23\n"
+      "csel x19, x24, x23, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xa1604330  // st1w { z16.s, z24.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      ".inst 0xa1604331  // st1w { z17.s, z25.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xa1604332  // st1w { z18.s, z26.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xa1604333  // st1w { z19.s, z27.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa1604324  // st1w { z4.s, z12.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xa1604325  // st1w { z5.s, z13.s }, p8, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 20f\n"
+      ".inst 0xa1604326  // st1w { z6.s, z14.s }, p8, [x25]\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments: End
+      "21:"  // Store to output array: End
+      "tbz x15, #0, 23f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "22:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 22b\n"
+      "23:"  // End block
+      "incw x9, ALL, MUL #2\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #2\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000..be1106d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL
+{
+public:
+  typedef int8_t operand_type;
+  typedef int32_t result_type;
+
+  typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<int32_t>() * 4;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<int32_t>() * 1;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL;
+
+  StdTransformsSME<operand_type, result_type, 4, 1, 4> transforms = {};
+
+  cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000..3623f5b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+  ARM_COMPUTE_UNUSED(act);
+
+  struct KernelArgs
+  {
+    KernelArgs(
+      const int8_t *const A,
+      const int8_t *const B,
+      int32_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+        C(C), ldcb(ldc * sizeof(int32_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      }
+
+    const int8_t *const A;
+    const int8_t *const B;
+    const long kstride_bytes;
+    int32_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+
+    const int32_t *const bias;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c1dc  // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa041c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa042c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      "whilelt p0.s, x9, x28\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc09025e0  // addha za0.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e1  // addha za1.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e2  // addha za2.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e3  // addha za3.s, p1/M, p1/M, z15.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19\n"
+      "incw x20, ALL, MUL #4\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa0408350  // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+      "ldnt1b { z7.b }, p1/Z, [x22]\n"
+      ".inst 0xa041835c  // ld1b { z28.b-z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      "ldnt1b { z13.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa0428340  // ld1b { z0.b-z3.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa0438358  // ld1b { z24.b-z27.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z23.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa0872600  // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa0872621  // smopa za1.s, p1/M, p1/M, z17.b, z7.b\n"
+      ".inst 0xa0872642  // smopa za2.s, p1/M, p1/M, z18.b, z7.b\n"
+      ".inst 0xa0872663  // smopa za3.s, p1/M, p1/M, z19.b, z7.b\n"
+      ".inst 0xa0408350  // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+      ".inst 0xa08d2780  // smopa za0.s, p1/M, p1/M, z28.b, z13.b\n"
+      "ldnt1b { z7.b }, p1/Z, [x22]\n"
+      ".inst 0xa08d27a1  // smopa za1.s, p1/M, p1/M, z29.b, z13.b\n"
+      ".inst 0xa08d27c2  // smopa za2.s, p1/M, p1/M, z30.b, z13.b\n"
+      ".inst 0xa08d27e3  // smopa za3.s, p1/M, p1/M, z31.b, z13.b\n"
+      ".inst 0xa041835c  // ld1b { z28.b-z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa08c2400  // smopa za0.s, p1/M, p1/M, z0.b, z12.b\n"
+      "ldnt1b { z13.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa08c2421  // smopa za1.s, p1/M, p1/M, z1.b, z12.b\n"
+      ".inst 0xa08c2442  // smopa za2.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa08c2463  // smopa za3.s, p1/M, p1/M, z3.b, z12.b\n"
+      ".inst 0xa0428340  // ld1b { z0.b-z3.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa0972700  // smopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+      ".inst 0xa0972721  // smopa za1.s, p1/M, p1/M, z25.b, z23.b\n"
+      ".inst 0xa0972742  // smopa za2.s, p1/M, p1/M, z26.b, z23.b\n"
+      ".inst 0xa0972763  // smopa za3.s, p1/M, p1/M, z27.b, z23.b\n"
+      ".inst 0xa0438358  // ld1b { z24.b-z27.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z23.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa0872600  // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
+      ".inst 0xa0872621  // smopa za1.s, p1/M, p1/M, z17.b, z7.b\n"
+      ".inst 0xa0872642  // smopa za2.s, p1/M, p1/M, z18.b, z7.b\n"
+      ".inst 0xa0872663  // smopa za3.s, p1/M, p1/M, z19.b, z7.b\n"
+      ".inst 0xa08d2780  // smopa za0.s, p1/M, p1/M, z28.b, z13.b\n"
+      ".inst 0xa08d27a1  // smopa za1.s, p1/M, p1/M, z29.b, z13.b\n"
+      ".inst 0xa08d27c2  // smopa za2.s, p1/M, p1/M, z30.b, z13.b\n"
+      ".inst 0xa08d27e3  // smopa za3.s, p1/M, p1/M, z31.b, z13.b\n"
+      ".inst 0xa08c2400  // smopa za0.s, p1/M, p1/M, z0.b, z12.b\n"
+      ".inst 0xa08c2421  // smopa za1.s, p1/M, p1/M, z1.b, z12.b\n"
+      ".inst 0xa08c2442  // smopa za2.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa08c2463  // smopa za3.s, p1/M, p1/M, z3.b, z12.b\n"
+      ".inst 0xa0972700  // smopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+      ".inst 0xa0972721  // smopa za1.s, p1/M, p1/M, z25.b, z23.b\n"
+      ".inst 0xa0972742  // smopa za2.s, p1/M, p1/M, z26.b, z23.b\n"
+      ".inst 0xa0972763  // smopa za3.s, p1/M, p1/M, z27.b, z23.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa0408350  // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #4\n"
+      "ld1b { z7.b }, p1/Z, [x22]\n"
+      "addvl x22, x22, #1\n"
+      ".inst 0xa0872600  // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
+      ".inst 0xa0872621  // smopa za1.s, p1/M, p1/M, z17.b, z7.b\n"
+      ".inst 0xa0872642  // smopa za2.s, p1/M, p1/M, z18.b, z7.b\n"
+      ".inst 0xa0872663  // smopa za3.s, p1/M, p1/M, z19.b, z7.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0860400  // mova { z0.s-z3.s }, za0h.s[x12]\n"
+      ".inst 0xc0840680  // mova za0h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa041c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa042c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840683  // mova za3h.s[x12], { z20.s-z23.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c1bc  // st1w { z28.s-z31.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1b0  // st1w { z16.s-z19.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 29f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xa060c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13]\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xa061c1a4  // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1a0  // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 29f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "sub x24, x11, x10\n"
+      "cntw x23\n"
+      "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+      "cmp x24, x23\n"
+      "csel x21, x24, x23, LT\n"
+      "add x25, x25, x9, LSL #2\n"  // C += n
+      "lsr x20, x21, #0x2\n"
+      "madd x25, x10, x22, x25\n"  // C += m * ldc
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 16f\n"
+      "15:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      "st1w { z28.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "st1w { z29.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z30.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z31.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 15b\n"
+      "16:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 17f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      "st1w { z8.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 17f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z9.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 17f\n"
+      "st1w { z10.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x23\n"
+      "csel x21, x24, x23, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 19f\n"
+      "18:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860420  // mova { z0.s-z3.s }, za1h.s[x12]\n"
+      "st1w { z0.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "st1w { z1.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z2.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z3.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 18b\n"
+      "19:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 20f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 20f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 20f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x23\n"
+      "csel x21, x24, x23, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 22f\n"
+      "21:"  // Store to output array: Accumulator row 2 loop
+      ".inst 0xc0860450  // mova { z16.s-z19.s }, za2h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z19.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 21b\n"
+      "22:"  // Store to output array: Accumulator row 2 oddments
+      "cbz x19, 23f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860440  // mova { z0.s-z3.s }, za2h.s[x12]\n"
+      "st1w { z0.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 23f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z1.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 23f\n"
+      "st1w { z2.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "23:"  // Store to output array: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 27f\n"
+      "cmp x24, x23\n"
+      "csel x19, x24, x23, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 25f\n"
+      "24:"  // Store to output array: Accumulator row 3 loop
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      "st1w { z12.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "st1w { z13.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "add x12, x12, #0x4\n"
+      "st1w { z14.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "cmp x12, x20, LSL #2\n"
+      "st1w { z15.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "blt 24b\n"
+      "25:"  // Store to output array: Accumulator row 3 oddments
+      "cbz x19, 26f\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      "st1w { z16.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 26f\n"
+      "subs x19, x19, #0x1\n"
+      "st1w { z17.s }, p0, [x25]\n"
+      "add x25, x25, x22\n"
+      "beq 26f\n"
+      "st1w { z18.s }, p0, [x25]\n"
+      "26:"  // Store to output array: Accumulator row 3 oddments: End
+      "27:"  // Store to output array: End
+      "tbz x15, #0, 29f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "28:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c1cc  // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840702  // mova za2h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa043c1c8  // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840503  // mova za3h.s[x12], { z8.s-z11.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 28b\n"
+      "29:"  // End block
+      "incw x9\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #4\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000..c7bd38d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL
+{
+public:
+  typedef uint8_t operand_type;
+  typedef uint8_t result_type;
+
+  typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<uint32_t>() * 1;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<uint32_t>() * 4;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_1VLx4VL;
+
+  StdTransformsSME<operand_type, result_type, 1, 4, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000..100f15c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const uint8_t *const A,
+      const uint8_t *const B,
+      uint8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
+        C(C), ldcb(ldc * sizeof(uint8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const uint8_t *const A;
+    const uint8_t *const B;
+    const long kstride_bytes;
+    uint8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<uint8_t>::min();
+    int32_t max = std::numeric_limits<uint8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x13, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x13, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc0840580  // mova za0h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa041c57c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840781  // mova za1h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa042c57c  // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840782  // mova za2h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa043c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x11, x11, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w9, [%x[args], %[offsetof_M]]\n"
+      "mov x28, #0x0\n"
+      "mov x27, #0x0\n"
+      "ldr w26, [%x[args], %[offsetof_N]]\n"
+      "ldr x25, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x24, x25\n"
+      ".inst 0x25ba6770  // whilelt pn8.s, x27, x26, VLx4\n"
+      "tbnz x13, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa01bc279  // ldnt1w { z24.s-z27.s }, p8/Z, [x19, x27, LSL #2]\n"
+      ".inst 0xc0902700  // addha za0.s, p1/M, p1/M, z24.s\n"
+      ".inst 0xc0902721  // addha za1.s, p1/M, p1/M, z25.s\n"
+      ".inst 0xc0902742  // addha za2.s, p1/M, p1/M, z26.s\n"
+      ".inst 0xc0902763  // addha za3.s, p1/M, p1/M, z27.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x27\n"
+      "mov x20, x28\n"
+      "incw x19, ALL, MUL #4\n"
+      "incw x20\n"
+      "cmp x19, x26\n"
+      "csel x20, x28, x20, LT\n"
+      "mov x19, x13\n"
+      "bfm x13, XZR, #0x0, #0x0  // bfc x13, #0x0, #0x1\n"
+      "cmp x20, x9\n"
+      "csel x13, x19, x13, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x27, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      ".inst 0xa04086dd  // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0xa04186cd  // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0xa04286d9  // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0xa04386c1  // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa1bc2540  // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1bd2541  // umopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa1be2542  // umopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa1bf2543  // umopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      ".inst 0xa1ac2600  // umopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
+      ".inst 0xa04086dd  // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa1ad2601  // umopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
+      ".inst 0xa1ae2602  // umopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
+      ".inst 0xa1af2603  // umopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
+      "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0xa1b826a0  // umopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
+      ".inst 0xa04186cd  // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa1b926a1  // umopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
+      ".inst 0xa1ba26a2  // umopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
+      ".inst 0xa1bb26a3  // umopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
+      "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0xa04286d9  // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+      ".inst 0xa1a02660  // umopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
+      ".inst 0xa1a12661  // umopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
+      ".inst 0xa1a22662  // umopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
+      ".inst 0xa1a32663  // umopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
+      "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0xa04386c1  // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+      "addvl x22, x22, #16\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa1bc2540  // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      ".inst 0xa1bd2541  // umopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa1be2542  // umopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa1bf2543  // umopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      ".inst 0xa1ac2600  // umopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
+      ".inst 0xa1ad2601  // umopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
+      ".inst 0xa1ae2602  // umopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
+      ".inst 0xa1af2603  // umopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
+      ".inst 0xa1b826a0  // umopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
+      ".inst 0xa1b926a1  // umopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
+      ".inst 0xa1ba26a2  // umopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
+      ".inst 0xa1bb26a3  // umopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
+      ".inst 0xa1a02660  // umopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
+      ".inst 0xa1a12661  // umopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
+      ".inst 0xa1a22662  // umopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
+      ".inst 0xa1a32663  // umopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      "ld1b { z10.b }, p1/Z, [x24]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x24, x24, #1\n"
+      ".inst 0xa04086dc  // ld1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #4\n"
+      ".inst 0xa1bc2540  // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
+      ".inst 0xa1bd2541  // umopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
+      ".inst 0xa1be2542  // umopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
+      ".inst 0xa1bf2543  // umopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      "ld1w { z14.s }, p1/Z, [x24]\n"
+      "addvl x24, x24, #1\n"
+      ".inst 0xc09125c0  // addva za0.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c1  // addva za1.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c2  // addva za2.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c3  // addva za3.s, p1/M, p1/M, z14.s\n"
+      "tbz x13, #1, 14f\n"
+      "tbz x13, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c578  // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc0840700  // mova za0h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa041c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xc0860458  // mova { z24.s-z27.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa042c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c564  // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c55c  // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
+      "addvl x11, x11, #16\n"
+      ".inst 0xa061c548  // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+      ".inst 0xa062c558  // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+      ".inst 0xa063c54c  // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+      "addvl x10, x10, #16\n"
+      "blt 11b\n"
+      "b 21f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc0860420  // mova { z0.s-z3.s }, za1h.s[x12]\n"
+      ".inst 0xa060c55c  // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa061c540  // st1w { z0.s-z3.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c548  // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+      ".inst 0xa063c550  // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+      "addvl x10, x10, #16\n"
+      "blt 13b\n"
+      "b 21f\n"
+      "14:"  // Store to output array
+      "ldr x23, [%x[args], %[offsetof_C]]\n"
+      "add x23, x23, x27\n"  // C += n
+      "sub x22, x9, x28\n"
+      "ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x23, x28, x21, x23\n"  // C += m * ldc
+      "ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x13, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x27\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa040c26c  // ld1w { z12.s-z15.s }, p8/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa040c264  // ld1w { z4.s-z7.s }, p8/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x19\n"
+      "whilelt p0.b, x27, x26\n"
+      "cmp x22, x19\n"
+      "csel x19, x22, x19, LT\n"
+      "lsr x20, x19, #0x1\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x1\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086001a  // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
+      ".inst 0xc086005c  // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
+      ".inst 0xc1aca41a  // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z12.s\n"
+      ".inst 0xc0860096  // mova { z22.s-z23.s }, za2h.s[x12, 0:1]\n"
+      ".inst 0xc08600d0  // mova { z16.s-z17.s }, za3h.s[x12, 0:1]\n"
+      ".inst 0xc1ada41c  // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
+      ".inst 0xc1aea416  // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
+      "add x12, x12, #0x2\n"
+      "cmp x12, x20, LSL #1\n"
+      ".inst 0xc1afa410  // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
+      ".inst 0xc1a4a23a  // srshl { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
+      ".inst 0xc1a5a23c  // srshl { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
+      ".inst 0xc1a6a236  // srshl { z22.s-z23.s }, { z22.s-z23.s }, z6.s\n"
+      ".inst 0xc1a7a230  // srshl { z16.s-z17.s }, { z16.s-z17.s }, z7.s\n"
+      ".inst 0xc1a1a31a  // add { z26.s-z27.s }, { z26.s-z27.s }, z1.s\n"
+      ".inst 0xc1a1a31c  // add { z28.s-z29.s }, { z28.s-z29.s }, z1.s\n"
+      ".inst 0xc1a1a316  // add { z22.s-z23.s }, { z22.s-z23.s }, z1.s\n"
+      ".inst 0xc1a1a310  // add { z16.s-z17.s }, { z16.s-z17.s }, z1.s\n"
+      ".inst 0xc1b4c6ba  // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6bc  // sclamp { z28.s-z29.s }, z21.s, z20.s\n"
+      "uzp1 z19.b, z26.b, z28.b\n"
+      ".inst 0xc1b4c6b6  // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6b0  // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
+      "uzp1 z16.b, z22.b, z16.b\n"
+      "uzp1 z18.b, z27.b, z29.b\n"
+      "uzp1 z17.b, z23.b, z17.b\n"
+      "uzp1 z16.b, z19.b, z16.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "add x23, x23, x21\n"
+      "uzp1 z16.b, z18.b, z17.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "add x23, x23, x21\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc0860002  // mova { z2.s-z3.s }, za0h.s[x12, 0:1]\n"
+      ".inst 0xc0860058  // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
+      ".inst 0xc1aca402  // sqdmulh { z2.s-z3.s }, { z2.s-z3.s }, z12.s\n"
+      ".inst 0xc0860090  // mova { z16.s-z17.s }, za2h.s[x12, 0:1]\n"
+      ".inst 0xc08600ca  // mova { z10.s-z11.s }, za3h.s[x12, 0:1]\n"
+      ".inst 0xc1ada418  // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z13.s\n"
+      ".inst 0xc1aea410  // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z14.s\n"
+      ".inst 0xc1afa40a  // sqdmulh { z10.s-z11.s }, { z10.s-z11.s }, z15.s\n"
+      ".inst 0xc1a4a222  // srshl { z2.s-z3.s }, { z2.s-z3.s }, z4.s\n"
+      ".inst 0xc1a5a238  // srshl { z24.s-z25.s }, { z24.s-z25.s }, z5.s\n"
+      ".inst 0xc1a6a230  // srshl { z16.s-z17.s }, { z16.s-z17.s }, z6.s\n"
+      ".inst 0xc1a7a22a  // srshl { z10.s-z11.s }, { z10.s-z11.s }, z7.s\n"
+      ".inst 0xc1a1a302  // add { z2.s-z3.s }, { z2.s-z3.s }, z1.s\n"
+      ".inst 0xc1a1a318  // add { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+      ".inst 0xc1a1a310  // add { z16.s-z17.s }, { z16.s-z17.s }, z1.s\n"
+      ".inst 0xc1a1a30a  // add { z10.s-z11.s }, { z10.s-z11.s }, z1.s\n"
+      ".inst 0xc1b4c6a2  // sclamp { z2.s-z3.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6b8  // sclamp { z24.s-z25.s }, z21.s, z20.s\n"
+      "uzp1 z23.b, z2.b, z24.b\n"
+      ".inst 0xc1b4c6b0  // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
+      ".inst 0xc1b4c6aa  // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
+      "uzp1 z16.b, z16.b, z10.b\n"
+      "uzp1 z16.b, z23.b, z16.b\n"
+      "st1b { z16.b }, p0, [x23]\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "19:"  // Store to output array: End
+      "tbz x13, #0, 21f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "20:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c570  // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c56c  // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x11, x11, #16\n"
+      "blt 20b\n"
+      "21:"  // End block
+      "incw x27, ALL, MUL #4\n"
+      "cmp x27, x26\n"
+      "blt 3b\n"
+      "incw x28\n"
+      "cmp x28, x9\n"
+      "mov x27, #0x0\n"
+      "mov x25, x24\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000..123405b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL
+{
+public:
+  typedef uint8_t operand_type;
+  typedef uint8_t result_type;
+
+  typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<uint32_t>() * 2;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<uint32_t>() * 2;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_2VLx2VL;
+
+  StdTransformsSME<operand_type, result_type, 2, 2, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000..6c42012
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const uint8_t *const A,
+      const uint8_t *const B,
+      uint8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
+        C(C), ldcb(ldc * sizeof(uint8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const uint8_t *const A;
+    const uint8_t *const B;
+    const long kstride_bytes;
+    uint8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<uint8_t>::min();
+    int32_t max = std::numeric_limits<uint8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207811  // ptrue pn9.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840400  // mova za0h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa041c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c5c0  // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840402  // mova za2h.s[x12], { z0.s-z3.s }\n"
+      ".inst 0xa043c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840783  // mova za3h.s[x12], { z28.s-z31.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      ".inst 0x25bc4530  // whilelt pn8.s, x9, x28, VLx2\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      ".inst 0xa0094275  // ldnt1w { z20.s-z21.s }, p8/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc0902680  // addha za0.s, p1/M, p1/M, z20.s\n"
+      ".inst 0xc09026a1  // addha za1.s, p1/M, p1/M, z21.s\n"
+      ".inst 0xc0902682  // addha za2.s, p1/M, p1/M, z20.s\n"
+      ".inst 0xc09026a3  // addha za3.s, p1/M, p1/M, z21.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19, ALL, MUL #2\n"
+      "incw x20, ALL, MUL #2\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa04006d1  // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa041074e  // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa04106c9  // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa0420740  // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa14206dc  // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306ca  // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa1b027c0  // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1b127c1  // umopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa1b027e2  // umopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa1b127e3  // umopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      ".inst 0xa1a825c0  // umopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
+      ".inst 0xa04006d1  // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      ".inst 0xa1a925c1  // umopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
+      ".inst 0xa1a825e2  // umopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
+      ".inst 0xa1a925e3  // umopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
+      ".inst 0xa041074e  // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+      ".inst 0xa1b42400  // umopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
+      ".inst 0xa04106c9  // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+      ".inst 0xa1bc2401  // umopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
+      ".inst 0xa1b42422  // umopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
+      ".inst 0xa1bc2423  // umopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+      ".inst 0xa0420740  // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa14206dc  // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+      ".inst 0xa1a22480  // umopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
+      ".inst 0xa1aa2481  // umopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
+      ".inst 0xa1a224a2  // umopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
+      ".inst 0xa1aa24a3  // umopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
+      ".inst 0xa0430744  // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+      "addvl x26, x26, #8\n"
+      ".inst 0xa14306ca  // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+      "addvl x22, x22, #8\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa1b027c0  // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      ".inst 0xa1b127c1  // umopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa1b027e2  // umopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa1b127e3  // umopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      ".inst 0xa1a825c0  // umopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
+      ".inst 0xa1a925c1  // umopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
+      ".inst 0xa1a825e2  // umopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
+      ".inst 0xa1a925e3  // umopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
+      ".inst 0xa1b42400  // umopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
+      ".inst 0xa1bc2401  // umopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
+      ".inst 0xa1b42422  // umopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
+      ".inst 0xa1bc2423  // umopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+      ".inst 0xa1a22480  // umopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
+      ".inst 0xa1aa2481  // umopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
+      ".inst 0xa1a224a2  // umopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
+      ".inst 0xa1aa24a3  // umopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa040075e  // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xa04006d0  // ld1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+      "addvl x22, x22, #2\n"
+      ".inst 0xa1b027c0  // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
+      ".inst 0xa1b127c1  // umopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
+      ".inst 0xa1b027e2  // umopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
+      ".inst 0xa1b127e3  // umopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      ".inst 0xa040474e  // ld1w { z14.s-z15.s }, pn9.b/Z, [x26]\n"
+      "addvl x26, x26, #2\n"
+      ".inst 0xc09125c0  // addva za0.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125c1  // addva za1.s, p1/M, p1/M, z14.s\n"
+      ".inst 0xc09125e2  // addva za2.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09125e3  // addva za3.s, p1/M, p1/M, z15.s\n"
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c5dc  // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0860408  // mova { z8.s-z11.s }, za0h.s[x12]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xc0860434  // mova { z20.s-z23.s }, za1h.s[x12]\n"
+      ".inst 0xa041c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840701  // mova za1h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xc086045c  // mova { z28.s-z31.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xa042c5d8  // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840702  // mova za2h.s[x12], { z24.s-z27.s }\n"
+      ".inst 0xa043c5cc  // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840583  // mova za3h.s[x12], { z12.s-z15.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c5b4  // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c5bc  // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 24f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0860424  // mova { z4.s-z7.s }, za1h.s[x12]\n"
+      ".inst 0xa060c5b0  // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+      ".inst 0xc0860448  // mova { z8.s-z11.s }, za2h.s[x12]\n"
+      ".inst 0xc086046c  // mova { z12.s-z15.s }, za3h.s[x12]\n"
+      ".inst 0xa061c5a4  // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c5a8  // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c5ac  // st1w { z12.s-z15.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 24f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x15, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x9\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa0404262  // ld1w { z2.s-z3.s }, p8/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      ".inst 0xa0404260  // ld1w { z0.s-z1.s }, p8/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x22\n"
+      "whilelt p0.h, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xc1a2ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+      ".inst 0xc1a3ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a0aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+      ".inst 0xc1a1aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
+      ".inst 0xc1abab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
+      ".inst 0xc1abab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+      ".inst 0xc1b8cf2c  // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf3c  // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z12.h, z28.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z13.h, z29.h\n"
+      "uzp1 z17.h, z14.h, z30.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z15.h, z31.h\n"
+      "st1b { z17.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc086041c  // mova { z28.s-z31.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xc1a2ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+      ".inst 0xc1a3ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a0aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
+      ".inst 0xc1a1aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+      ".inst 0xc1abab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+      ".inst 0xc1abab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
+      ".inst 0xc1b8cf3c  // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf2c  // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z28.h, z12.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "subs x19, x19, #0x1\n"
+      "uzp1 z16.h, z29.h, z13.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "uzp1 z16.h, z30.h, z14.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 22f\n"
+      "whilelt p0.h, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860444  // mova { z4.s-z7.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xc1a2ac04  // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+      ".inst 0xc1a3ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a0aa24  // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+      ".inst 0xc1a1aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+      ".inst 0xc1abab04  // add { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+      ".inst 0xc1abab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+      ".inst 0xc1b8cf24  // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf30  // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z4.h, z16.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z5.h, z17.h\n"
+      "uzp1 z17.h, z6.h, z18.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "uzp1 z16.h, z7.h, z19.h\n"
+      "st1b { z17.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860470  // mova { z16.s-z19.s }, za3h.s[x12]\n"
+      ".inst 0xc1a2ac14  // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+      ".inst 0xc1a3ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a0aa34  // srshl { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
+      ".inst 0xc1a1aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+      ".inst 0xc1abab14  // add { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+      ".inst 0xc1abab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+      ".inst 0xc1b8cf34  // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
+      ".inst 0xc1b8cf30  // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+      "uzp1 z16.h, z20.h, z16.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      "uzp1 z16.h, z21.h, z17.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "uzp1 z16.h, z22.h, z18.h\n"
+      "st1b { z16.h }, p0, [x25]\n"
+      "21:"  // Store to output array: Accumulator row 1 oddments: End
+      "22:"  // Store to output array: End
+      "tbz x15, #0, 24f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "23:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+      ".inst 0xc0840600  // mova za0h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa041c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c5d0  // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c5c4  // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 23b\n"
+      "24:"  // End block
+      "incw x9, ALL, MUL #2\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #2\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000..2e61cf4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_u8q_mopa_4VLx1VL
+{
+public:
+  typedef uint8_t operand_type;
+  typedef uint8_t result_type;
+
+  typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
+
+  /* Kernel blocking parameters */
+  static unsigned int out_height()
+  {
+    return sme::get_vector_length<uint32_t>() * 4;
+  }
+
+  static unsigned int out_width()
+  {
+    return sme::get_vector_length<uint32_t>() * 1;
+  }
+
+  static constexpr unsigned int k_unroll()
+  {
+    return 4;
+  }
+
+  static constexpr bool supports_accumulate()
+  {
+    return false;
+  }
+
+  static constexpr bool supports_bias()
+  {
+    return true;
+  }
+
+  static constexpr bool supports_activation()
+  {
+    return false;
+  }
+
+  static constexpr bool is_sme()
+  {
+    return true;
+  }
+
+  // Default to the generic kernel
+  kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_4VLx1VL;
+
+  StdTransformsSME<operand_type, result_type, 4, 1, 4, true> transforms = {};
+
+  cls_sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const CPUInfo *ci)
+  {
+    ARM_COMPUTE_UNUSED(ci);
+  }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000..40d2fff
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer)
+{
+  struct KernelArgs
+  {
+    KernelArgs(
+      const uint8_t *const A,
+      const uint8_t *const B,
+      uint8_t *const C, const int ldc,
+      const int M, const int N, const int K,
+      const int32_t *const bias,
+      const Requantize32 &rq,
+      const int n_0,
+      bool accumulate,
+      int32_t *const accumulator_buffer
+    ) : A(A),
+        B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
+        C(C), ldcb(ldc * sizeof(uint8_t)),
+        M(M), N(N), K(K),
+        n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+
+        bias(bias), n_0(n_0),
+        accumulator_buffer(accumulator_buffer),
+        flags(0x0)
+    {
+      if (accumulate)
+      {
+        flags |= 1 << 0;  // FILL_ACCUMULATORS_FROM_BUFFER
+      }
+      if (C == nullptr)
+      {
+        flags |= 1 << 1;  // STORE_ACCUMULATORS_TO_BUFFER
+      }
+      if (rq.per_channel_requant)
+      {
+        flags |= 1 << 2;  // PER_CHANNEL_QUANTISATION
+      }
+      }
+
+    const uint8_t *const A;
+    const uint8_t *const B;
+    const long kstride_bytes;
+    uint8_t *const C;
+    const long ldcb;
+    const long M, N, K, n_loops, n_tail_iters;
+    int32_t min = std::numeric_limits<uint8_t>::min();
+    int32_t max = std::numeric_limits<uint8_t>::max();
+
+    const int32_t *const bias;
+    const int n_0;
+
+    int32_t *const accumulator_buffer;
+    uint64_t flags;
+  };
+
+  // Construct arguments for this kernel
+  KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
+
+  __asm__ __volatile__(
+      "ldr x15, [%x[args], %[offsetof_flags]]\n"
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      ".inst 0x25207810  // ptrue pn8.b\n"
+      "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+      "tbz x15, #0, 2f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "1:"  // Initial accumulator load from buffer: Loop
+      ".inst 0xa040c1dc  // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840780  // mova za0h.s[x12], { z28.s-z31.s }\n"
+      ".inst 0xa041c1cc  // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840581  // mova za1h.s[x12], { z12.s-z15.s }\n"
+      ".inst 0xa042c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840682  // mova za2h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xa043c1d8  // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840703  // mova za3h.s[x12], { z24.s-z27.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 1b\n"
+      "2:"  // Initial accumulator load from buffer: End
+      "ldr w11, [%x[args], %[offsetof_M]]\n"
+      "mov x10, #0x0\n"
+      "mov x9, #0x0\n"
+      "ldr w28, [%x[args], %[offsetof_N]]\n"
+      "ldr x27, [%x[args], %[offsetof_A]]\n"
+      "3:"  // M and N loop
+      "mov x26, x27\n"
+      "whilelt p0.s, x9, x28\n"
+      "tbnz x15, #0, 4f\n"
+      "ldr x19, [%x[args], %[offsetof_bias]]\n"
+      ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+      "cbz x19, 5f\n"
+      "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
+      ".inst 0xc09025e0  // addha za0.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e1  // addha za1.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e2  // addha za2.s, p1/M, p1/M, z15.s\n"
+      ".inst 0xc09025e3  // addha za3.s, p1/M, p1/M, z15.s\n"
+      "4:"  // Prepare accumulators: Test for last block
+      "mov x19, x9\n"
+      "mov x20, x10\n"
+      "incw x19\n"
+      "incw x20, ALL, MUL #4\n"
+      "cmp x19, x28\n"
+      "csel x20, x10, x20, LT\n"
+      "mov x19, x15\n"
+      "bfm x15, XZR, #0x0, #0x0  // bfc x15, #0x0, #0x1\n"
+      "cmp x20, x11\n"
+      "csel x15, x19, x15, LT\n"
+      "5:"  // Prepare accumulators: End
+      "ldr x19, [%x[args], %[offsetof_K]]\n"
+      "add x19, x19, #0x3\n"
+      "lsr x19, x19, #0x2\n"
+      "ldr x22, [%x[args], %[offsetof_B]]\n"
+      "lsr x21, x19, #0x2\n"
+      "and x20, x19, #0x3\n"
+      "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+      "madd x22, x9, x19, x22\n"  // bptr = B + n * kstride_bytes
+      "cbz x21, 8f\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      "ldnt1b { z0.b }, p1/Z, [x22]\n"
+      ".inst 0xa1418353  // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa1428350  // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa1438342  // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "ble 7f\n"
+      "6:"  // K loop
+      ".inst 0xa1a02640  // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      "subs x21, x21, #0x1\n"
+      ".inst 0xa1a026c1  // umopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa1a02742  // umopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa1a027c3  // umopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      ".inst 0xa1a92660  // umopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
+      "ldnt1b { z0.b }, p1/Z, [x22]\n"
+      ".inst 0xa1a926e1  // umopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
+      ".inst 0xa1a92762  // umopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
+      ".inst 0xa1a927e3  // umopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
+      ".inst 0xa1418353  // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+      ".inst 0xa1b52600  // umopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
+      "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+      ".inst 0xa1b52681  // umopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
+      ".inst 0xa1b52702  // umopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
+      ".inst 0xa1b52783  // umopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
+      ".inst 0xa1428350  // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+      "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0xa1ac2440  // umopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa1ac24c1  // umopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
+      ".inst 0xa1ac2542  // umopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
+      ".inst 0xa1ac25c3  // umopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
+      ".inst 0xa1438342  // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+      "addvl x26, x26, #16\n"
+      "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      "bgt 6b\n"
+      "7:"  // K loop tail
+      ".inst 0xa1a02640  // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      ".inst 0xa1a026c1  // umopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa1a02742  // umopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa1a027c3  // umopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      ".inst 0xa1a92660  // umopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
+      ".inst 0xa1a926e1  // umopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
+      ".inst 0xa1a92762  // umopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
+      ".inst 0xa1a927e3  // umopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
+      ".inst 0xa1b52600  // umopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
+      ".inst 0xa1b52681  // umopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
+      ".inst 0xa1b52702  // umopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
+      ".inst 0xa1b52783  // umopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
+      ".inst 0xa1ac2440  // umopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
+      ".inst 0xa1ac24c1  // umopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
+      ".inst 0xa1ac2542  // umopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
+      ".inst 0xa1ac25c3  // umopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
+      "8:"  // K oddments
+      "cbz x20, 10f\n"
+      "9:"  // K oddments: Loop
+      ".inst 0xa1408352  // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+      "subs x20, x20, #0x1\n"
+      "addvl x26, x26, #4\n"
+      "ld1b { z0.b }, p1/Z, [x22]\n"
+      "addvl x22, x22, #1\n"
+      ".inst 0xa1a02640  // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
+      ".inst 0xa1a026c1  // umopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
+      ".inst 0xa1a02742  // umopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
+      ".inst 0xa1a027c3  // umopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
+      "bgt 9b\n"
+      "10:"  // K oddments: End
+      ".inst 0xa040c340  // ld1w { z0.s-z3.s }, pn8.b/Z, [x26]\n"
+      "addvl x26, x26, #4\n"
+      ".inst 0xc0912400  // addva za0.s, p1/M, p1/M, z0.s\n"
+      ".inst 0xc0912421  // addva za1.s, p1/M, p1/M, z1.s\n"
+      ".inst 0xc0912442  // addva za2.s, p1/M, p1/M, z2.s\n"
+      ".inst 0xc0912463  // addva za3.s, p1/M, p1/M, z3.s\n"
+      "tbz x15, #1, 14f\n"
+      "tbz x15, #0, 12f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "11:"  // Store to partial result buffer: Store and refill: Loop
+      ".inst 0xa040c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc0840680  // mova za0h.s[x12], { z20.s-z23.s }\n"
+      ".inst 0xc0860428  // mova { z8.s-z11.s }, za1h.s[x12]\n"
+      ".inst 0xa041c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840481  // mova za1h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc086047c  // mova { z28.s-z31.s }, za3h.s[x12]\n"
+      ".inst 0xa042c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840482  // mova za2h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa043c1d4  // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840683  // mova za3h.s[x12], { z20.s-z23.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa060c1b0  // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
+      "addvl x14, x14, #16\n"
+      ".inst 0xa061c1a8  // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      ".inst 0xa062c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1bc  // st1w { z28.s-z31.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 11b\n"
+      "b 30f\n"
+      "12:"  // Store to partial result buffer: Store only
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "13:"  // Store to partial result buffer: Store only: Loop
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc086042c  // mova { z12.s-z15.s }, za1h.s[x12]\n"
+      ".inst 0xa060c1b0  // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
+      ".inst 0xc0860454  // mova { z20.s-z23.s }, za2h.s[x12]\n"
+      ".inst 0xc0860478  // mova { z24.s-z27.s }, za3h.s[x12]\n"
+      ".inst 0xa061c1ac  // st1w { z12.s-z15.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      ".inst 0xa062c1b4  // st1w { z20.s-z23.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+      ".inst 0xa063c1b8  // st1w { z24.s-z27.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+      "addvl x13, x13, #16\n"
+      "blt 13b\n"
+      "b 30f\n"
+      "14:"  // Store to output array
+      "ldr x25, [%x[args], %[offsetof_C]]\n"
+      "add x25, x25, x9\n"  // C += n
+      "sub x24, x11, x10\n"
+      "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+      "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+      "madd x25, x10, x23, x25\n"  // C += m * ldc
+      "ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+      "ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+      "ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+      "ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+      "tbz x15, #2, 15f\n"
+      "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+      "add x20, x20, x9\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      "ld1w { z8.s }, p0/Z, [x19]\n"
+      "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+      "add x19, x19, x20, LSL #2\n"
+      "ld1w { z7.s }, p0/Z, [x19]\n"
+      "15:"  // Store to output array: Load per-channel parameters: End
+      "cntw x22\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 17f\n"
+      "16:"  // Store to output array: Accumulator row 0 loop
+      ".inst 0xc086040c  // mova { z12.s-z15.s }, za0h.s[x12]\n"
+      ".inst 0xc1a8ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a4ccac  // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
+      "st1b { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z15.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 16b\n"
+      "17:"  // Store to output array: Accumulator row 0 oddments
+      "cbz x19, 18f\n"
+      ".inst 0xc0860410  // mova { z16.s-z19.s }, za0h.s[x12]\n"
+      ".inst 0xc1a8ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
+      ".inst 0xc1a6ab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+      ".inst 0xc1a4ccb0  // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
+      "st1b { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 18f\n"
+      "st1b { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "18:"  // Store to output array: Accumulator row 0 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 20f\n"
+      "19:"  // Store to output array: Accumulator row 1 loop
+      ".inst 0xc0860430  // mova { z16.s-z19.s }, za1h.s[x12]\n"
+      ".inst 0xc1a8ac10  // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa30  // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab10  // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+      ".inst 0xc1a4ccb0  // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
+      "st1b { z16.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z17.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z18.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z19.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 19b\n"
+      "20:"  // Store to output array: Accumulator row 1 oddments
+      "cbz x19, 21f\n"
+      ".inst 0xc086043c  // mova { z28.s-z31.s }, za1h.s[x12]\n"
+      ".inst 0xc1a8ac1c  // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa3c  // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+      ".inst 0xc1a6ab1c  // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+      ".inst 0xc1a4ccbc  // sclamp { z28.s-z31.s }, z5.s, z4.s\n"
+      "st1b { z28.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z29.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 21f\n"
+      "st1b { z30.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "21:"  // Store to output array: Accumulator row 1 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x21, x24, x22, LT\n"
+      "lsr x20, x21, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x21, #0x3\n"
+      "cbz x20, 23f\n"
+      "22:"  // Store to output array: Accumulator row 2 loop
+      ".inst 0xc0860458  // mova { z24.s-z27.s }, za2h.s[x12]\n"
+      ".inst 0xc1a8ac18  // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa38  // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab18  // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
+      ".inst 0xc1a4ccb8  // sclamp { z24.s-z27.s }, z5.s, z4.s\n"
+      "st1b { z24.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z25.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z26.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z27.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 22b\n"
+      "23:"  // Store to output array: Accumulator row 2 oddments
+      "cbz x19, 24f\n"
+      ".inst 0xc086044c  // mova { z12.s-z15.s }, za2h.s[x12]\n"
+      ".inst 0xc1a8ac0c  // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa2c  // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+      ".inst 0xc1a6ab0c  // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+      ".inst 0xc1a4ccac  // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
+      "st1b { z12.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z13.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 24f\n"
+      "st1b { z14.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "24:"  // Store to output array: Accumulator row 2 oddments: End
+      "subs x24, x24, x21\n"
+      "beq 28f\n"
+      "whilelt p0.s, x9, x28\n"
+      "cmp x24, x22\n"
+      "csel x19, x24, x22, LT\n"
+      "lsr x20, x19, #0x2\n"
+      "mov x12, #0x0\n"
+      "and x19, x19, #0x3\n"
+      "cbz x20, 26f\n"
+      "25:"  // Store to output array: Accumulator row 3 loop
+      ".inst 0xc0860474  // mova { z20.s-z23.s }, za3h.s[x12]\n"
+      ".inst 0xc1a8ac14  // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+      "add x12, x12, #0x4\n"
+      ".inst 0xc1a7aa34  // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
+      "cmp x12, x20, LSL #2\n"
+      ".inst 0xc1a6ab14  // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
+      ".inst 0xc1a4ccb4  // sclamp { z20.s-z23.s }, z5.s, z4.s\n"
+      "st1b { z20.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z21.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z22.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "st1b { z23.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "blt 25b\n"
+      "26:"  // Store to output array: Accumulator row 3 oddments
+      "cbz x19, 27f\n"
+      ".inst 0xc0860460  // mova { z0.s-z3.s }, za3h.s[x12]\n"
+      ".inst 0xc1a8ac00  // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+      "subs x19, x19, #0x1\n"
+      ".inst 0xc1a7aa20  // srshl { z0.s-z3.s }, { z0.s-z3.s }, z7.s\n"
+      ".inst 0xc1a6ab00  // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+      ".inst 0xc1a4cca0  // sclamp { z0.s-z3.s }, z5.s, z4.s\n"
+      "st1b { z0.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "subs x19, x19, #0x1\n"
+      "st1b { z1.s }, p0, [x25]\n"
+      "add x25, x25, x23\n"
+      "beq 27f\n"
+      "st1b { z2.s }, p0, [x25]\n"
+      "27:"  // Store to output array: Accumulator row 3 oddments: End
+      "28:"  // Store to output array: End
+      "tbz x15, #0, 30f\n"
+      "mov x12, #0x0\n"
+      "cntw x19\n"
+      "29:"  // Store to output array: Refill accumulators: Loop
+      ".inst 0xa040c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+      ".inst 0xc0840480  // mova za0h.s[x12], { z4.s-z7.s }\n"
+      ".inst 0xa041c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+      ".inst 0xc0840601  // mova za1h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa042c1d0  // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+      ".inst 0xc0840602  // mova za2h.s[x12], { z16.s-z19.s }\n"
+      ".inst 0xa043c1c4  // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+      ".inst 0xc0840483  // mova za3h.s[x12], { z4.s-z7.s }\n"
+      "add x12, x12, #0x4\n"
+      "cmp x12, x19\n"
+      "addvl x14, x14, #16\n"
+      "blt 29b\n"
+      "30:"  // End block
+      "incw x9\n"
+      "cmp x9, x28\n"
+      "blt 3b\n"
+      "incw x10, ALL, MUL #4\n"
+      "cmp x10, x11\n"
+      "mov x9, #0x0\n"
+      "mov x27, x26\n"
+      "blt 3b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      :
+      : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+}  // namespace arm_gemm
+
+#endif  // ARM_COMPUTE_ENABLE_SME2
+#endif  // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/misc-sve.cpp b/src/core/NEON/kernels/arm_gemm/misc-sve.cpp
new file mode 100644
index 0000000..ffe0981
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/misc-sve.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+namespace arm_gemm {
+
+namespace utils {
+
+#ifdef ARM_COMPUTE_ENABLE_SME
+namespace sme {
+
+unsigned long raw_vector_length() {
+    static unsigned long res=0;
+
+    if (!res) {
+        __asm __volatile(
+            ".inst 0xd503477f  // SMSTART ZA\n"
+            "cntb     %0\n"
+            ".inst 0xd503467f  // SMSTOP\n"
+            : "=r" (res)
+            :
+            : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15",
+              "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+        );
+    }
+
+    return res;
+}
+
+} // namespace sme
+#endif // ARM_COMPUTE_ENABLE_SME
+
+} // namespace utils
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_sme.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_sme.hpp
new file mode 100644
index 0000000..054088e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_sme.hpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2022 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "interleave_indirect.hpp"
+#include "transform.hpp"
+
+namespace arm_gemm {
+
+/*
+ * Define "standard" transforms for the blocked GEMMs for SVE.
+ *
+ * This assumes that A is interleaved 'height' ways, B is interleaved
+ * 'width'xVL ways and transposed, and that the merge needs to work in
+ * 'height' x 'width'xVL blocks.
+ *
+ * The optional 'block' parameter is for kernels using dot-product type
+ * instructions like UDOT and SDOT.
+ */
+template<typename TOperand, typename TResult, unsigned int height_vectors, unsigned int width_vectors, unsigned int block=1, bool integrate_sums=false>
+class StdTransformsSME
+{
+public:
+    template<typename TIn>
+    void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+                  const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
+        Interleave<height_vectors, block, VLType::SME>(out, in, stride, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
+    }
+
+    template<typename TIn>
+    void PrepareA_indirect(TOperand *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
+                           const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
+        IndirectInterleave<height_vectors, block, VLType::SME>(out, ptr, stringlen, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
+    }
+
+    template<typename TIn>
+    void PrepareA_convolution(TOperand *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
+                              const int y0, const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
+        ConvolutionInterleave<height_vectors, block, VLType::SME>(out, ptr, stride, conv, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
+    }
+
+    template<typename TIn>
+    void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+                  const int xmax, const int k0, const int kmax) {
+        Transform<width_vectors, block,  true, VLType::SME>(out, in, stride, x0, xmax, k0, kmax);
+    }
+
+    template<typename TOut>
+    void Merge(TOut *out, const TResult *in, int stride, int y0, int ymax, int x0, int xmax, const TOut *bias, const Activation act, bool accumulate) {
+        // Separate merge not supported for SME.
+    }
+};
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/list-sve.hpp b/src/core/NEON/kernels/arm_gemm/transforms/list-sve.hpp
index 895177b..c066c01 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/list-sve.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/list-sve.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,6 +21,24 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+#ifdef ARM_COMPUTE_ENABLE_SME
+#include "sme_transpose_interleave_16VL_1x4.hpp"
+#include "sme_transpose_interleave_16VL_2x2_fp32bf16.hpp"
+#include "sme_transpose_interleave_16VL_2x2.hpp"
+#include "sme_transpose_interleave_16VL.hpp"
+#include "sme_transpose_interleave_1VL_1x4.hpp"
+#include "sme_transpose_interleave_1VL_2x2_fp32bf16.hpp"
+#include "sme_transpose_interleave_1VL_2x2.hpp"
+#include "sme_transpose_interleave_1VL.hpp"
+#include "sme_transpose_interleave_2VL_1x4.hpp"
+#include "sme_transpose_interleave_2VL_2x2.hpp"
+#include "sme_transpose_interleave_2VL_2x2_fp32bf16.hpp"
+#include "sme_transpose_interleave_2VL.hpp"
+#include "sme_transpose_interleave_4VL_1x4.hpp"
+#include "sme_transpose_interleave_4VL_2x2.hpp"
+#include "sme_transpose_interleave_4VL_2x2_fp32bf16.hpp"
+#include "sme_transpose_interleave_4VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME
 #include "sve_transpose_interleave_12VL_2x4_fp32bf16.hpp"
 #include "sve_transpose_interleave_1VL_1x4.hpp"
 #include "sve_transpose_interleave_1VL.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
new file mode 100644
index 0000000..3475d30
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t width, size_t in_stride, size_t height)
+{
+    size_t out_stride = 16 * height * sme::get_vector_length<uint8_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p7.b\n"
+      "1:"  // Main row loop: Head
+      "mov x22, %x[in]\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z31.s }, p0/Z, [x22]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z30.s }, p0/Z, [x22, #1, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z29.s }, p0/Z, [x22, #2, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z28.s }, p0/Z, [x22, #3, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z27.s }, p0/Z, [x22, #4, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z26.s }, p0/Z, [x22, #5, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z25.s }, p0/Z, [x22, #6, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z24.s }, p0/Z, [x22, #7, MUL VL]\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p6.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p5.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p4.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p3.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p2.s, XZR, x19\n"
+      "decw x19\n"
+      "whilelt p1.s, XZR, x19\n"
+      "decw x19\n"
+      "addvl x22, x22, #16\n"
+      "ld1w { z23.s }, p0/Z, [x22, #-8, MUL VL]\n"
+      "whilelt p0.s, XZR, x19\n"
+      "mov x19, x21\n"
+      "ld1w { z22.s }, p6/Z, [x22, #-7, MUL VL]\n"
+      "decw x20, ALL, MUL #16\n"
+      "ld1w { z21.s }, p5/Z, [x22, #-6, MUL VL]\n"
+      "cmp x20, #0x0\n"
+      "ld1w { z20.s }, p4/Z, [x22, #-5, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "ld1w { z19.s }, p3/Z, [x22, #-4, MUL VL]\n"
+      "ld1w { z18.s }, p2/Z, [x22, #-3, MUL VL]\n"
+      "ld1w { z17.s }, p1/Z, [x22, #-2, MUL VL]\n"
+      "ld1w { z16.s }, p0/Z, [x22, #-1, MUL VL]\n"
+      "st1w { z31.s }, p7, [x19]\n"
+      "st1w { z30.s }, p7, [x19, #1, MUL VL]\n"
+      "st1w { z29.s }, p7, [x19, #2, MUL VL]\n"
+      "st1w { z28.s }, p7, [x19, #3, MUL VL]\n"
+      "st1w { z27.s }, p7, [x19, #4, MUL VL]\n"
+      "st1w { z26.s }, p7, [x19, #5, MUL VL]\n"
+      "st1w { z25.s }, p7, [x19, #6, MUL VL]\n"
+      "st1w { z24.s }, p7, [x19, #7, MUL VL]\n"
+      "addvl x19, x19, #16\n"
+      "st1w { z23.s }, p7, [x19, #-8, MUL VL]\n"
+      "st1w { z22.s }, p7, [x19, #-7, MUL VL]\n"
+      "st1w { z21.s }, p7, [x19, #-6, MUL VL]\n"
+      "st1w { z20.s }, p7, [x19, #-5, MUL VL]\n"
+      "st1w { z19.s }, p7, [x19, #-4, MUL VL]\n"
+      "st1w { z18.s }, p7, [x19, #-3, MUL VL]\n"
+      "st1w { z17.s }, p7, [x19, #-2, MUL VL]\n"
+      "st1w { z16.s }, p7, [x19, #-1, MUL VL]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #16\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<16, 1, true, VLType::SME>(
+    float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_16VL(
+        reinterpret_cast<uint32_t *>(out),
+        reinterpret_cast<const uint32_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(float) / 4,
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
new file mode 100644
index 0000000..fc3c4ab
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_16VL_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
+
+    if (height % 4) {
+        memset(pad_row, 0, width * sizeof(uint8_t));
+    }
+
+    size_t out_stride = 16 * roundup<size_t>(height, 4) * sme::get_vector_length<uint32_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p4.b\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "cmp %x[height], #0x3\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "csel x22, x22, %x[pad_row], GT\n"
+      "csel x23, x23, %x[pad_row], GE\n"
+      "cmp %x[height], #0x1\n"
+      "mov x21, %x[out]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p3.b, XZR, x19\n"
+      "ld1b { z20.b }, p3/Z, [x25]\n"
+      "decb x19\n"
+      "whilelt p2.b, XZR, x19\n"
+      "ld1b { z18.b }, p2/Z, [x25, #1, MUL VL]\n"
+      "decb x19\n"
+      "whilelt p1.b, XZR, x19\n"
+      "ld1b { z17.b }, p3/Z, [x24]\n"
+      "decb x19\n"
+      "whilelt p0.b, XZR, x19\n"
+      "ld1b { z19.b }, p2/Z, [x24, #1, MUL VL]\n"
+      "ld1b { z16.b }, p3/Z, [x23]\n"
+      "zip1 z25.b, z20.b, z16.b\n"
+      "zip2 z24.b, z20.b, z16.b\n"
+      "mov x19, x21\n"
+      "ld1b { z16.b }, p2/Z, [x23, #1, MUL VL]\n"
+      "zip1 z22.b, z18.b, z16.b\n"
+      "zip2 z21.b, z18.b, z16.b\n"
+      "decw x20, ALL, MUL #16\n"
+      "ld1b { z16.b }, p3/Z, [x22]\n"
+      "zip1 z18.b, z17.b, z16.b\n"
+      "zip2 z17.b, z17.b, z16.b\n"
+      "cmp x20, #0x0\n"
+      "ld1b { z16.b }, p2/Z, [x22, #1, MUL VL]\n"
+      "zip1 z20.b, z19.b, z16.b\n"
+      "zip2 z16.b, z19.b, z16.b\n"
+      "add x21, x21, %x[out_stride]\n"
+      "ld1b { z19.b }, p1/Z, [x25, #2, MUL VL]\n"
+      "zip1 z23.b, z25.b, z18.b\n"
+      "zip2 z0.b, z25.b, z18.b\n"
+      "ld1b { z18.b }, p0/Z, [x25, #3, MUL VL]\n"
+      "zip1 z31.b, z24.b, z17.b\n"
+      "zip2 z30.b, z24.b, z17.b\n"
+      "addvl x25, x25, #4\n"
+      "ld1b { z17.b }, p1/Z, [x24, #2, MUL VL]\n"
+      "zip1 z29.b, z22.b, z20.b\n"
+      "zip2 z28.b, z22.b, z20.b\n"
+      "ld1b { z22.b }, p0/Z, [x24, #3, MUL VL]\n"
+      "zip1 z27.b, z21.b, z16.b\n"
+      "zip2 z26.b, z21.b, z16.b\n"
+      "addvl x24, x24, #4\n"
+      "ld1b { z16.b }, p1/Z, [x23, #2, MUL VL]\n"
+      "zip1 z21.b, z19.b, z16.b\n"
+      "zip2 z20.b, z19.b, z16.b\n"
+      "ld1b { z16.b }, p0/Z, [x23, #3, MUL VL]\n"
+      "zip1 z25.b, z18.b, z16.b\n"
+      "zip2 z24.b, z18.b, z16.b\n"
+      "addvl x23, x23, #4\n"
+      "ld1b { z16.b }, p1/Z, [x22, #2, MUL VL]\n"
+      "zip1 z19.b, z17.b, z16.b\n"
+      "zip2 z18.b, z17.b, z16.b\n"
+      "ld1b { z16.b }, p0/Z, [x22, #3, MUL VL]\n"
+      "zip1 z17.b, z22.b, z16.b\n"
+      "zip2 z16.b, z22.b, z16.b\n"
+      "addvl x22, x22, #4\n"
+      "st1b { z23.b }, p4, [x19]\n"
+      "zip1 z23.b, z21.b, z19.b\n"
+      "zip2 z22.b, z21.b, z19.b\n"
+      "st1b { z0.b }, p4, [x19, #1, MUL VL]\n"
+      "zip1 z21.b, z20.b, z18.b\n"
+      "zip2 z20.b, z20.b, z18.b\n"
+      "st1b { z31.b }, p4, [x19, #2, MUL VL]\n"
+      "zip1 z19.b, z25.b, z17.b\n"
+      "zip2 z18.b, z25.b, z17.b\n"
+      "st1b { z30.b }, p4, [x19, #3, MUL VL]\n"
+      "zip1 z17.b, z24.b, z16.b\n"
+      "zip2 z16.b, z24.b, z16.b\n"
+      "st1b { z29.b }, p4, [x19, #4, MUL VL]\n"
+      "st1b { z28.b }, p4, [x19, #5, MUL VL]\n"
+      "st1b { z27.b }, p4, [x19, #6, MUL VL]\n"
+      "st1b { z26.b }, p4, [x19, #7, MUL VL]\n"
+      "addvl x19, x19, #16\n"
+      "st1b { z23.b }, p4, [x19, #-8, MUL VL]\n"
+      "st1b { z22.b }, p4, [x19, #-7, MUL VL]\n"
+      "st1b { z21.b }, p4, [x19, #-6, MUL VL]\n"
+      "st1b { z20.b }, p4, [x19, #-5, MUL VL]\n"
+      "st1b { z19.b }, p4, [x19, #-4, MUL VL]\n"
+      "st1b { z18.b }, p4, [x19, #-3, MUL VL]\n"
+      "st1b { z17.b }, p4, [x19, #-2, MUL VL]\n"
+      "st1b { z16.b }, p4, [x19, #-1, MUL VL]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #16\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<16, 4, true, VLType::SME>(
+    uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_16VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(uint8_t) / 1,
+        stride * sizeof(uint8_t),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<16, 4, true, VLType::SME>(
+    int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_16VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(int8_t) / 1,
+        stride * sizeof(int8_t),
+        (kmax-k0)
+    );
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp
new file mode 100644
index 0000000..af1649f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_16VL_2x2(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint16_t *pad_row = reinterpret_cast<uint16_t *>(alloca(width * sizeof(uint16_t)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(uint16_t));
+    }
+
+    size_t out_stride = 16 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p5.b\n"
+      "1:"  // Main row loop: Head
+      "mov x23, %x[in]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "csel x22, x22, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p2.h, XZR, x19\n"
+      "ld1h { z17.h }, p2/Z, [x23]\n"
+      "dech x19\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z19.h }, p1/Z, [x23, #1, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z21.h }, p0/Z, [x23, #2, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p4.h, XZR, x19\n"
+      "ld1h { z20.h }, p4/Z, [x23, #3, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p3.h, XZR, x19\n"
+      "ld1h { z16.h }, p2/Z, [x22]\n"
+      "zip1 z0.h, z17.h, z16.h\n"
+      "dech x19\n"
+      "whilelt p2.h, XZR, x19\n"
+      "ld1h { z18.h }, p1/Z, [x22, #1, MUL VL]\n"
+      "zip2 z31.h, z17.h, z16.h\n"
+      "dech x19\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z17.h }, p0/Z, [x22, #2, MUL VL]\n"
+      "zip1 z30.h, z19.h, z18.h\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z16.h }, p4/Z, [x22, #3, MUL VL]\n"
+      "zip2 z29.h, z19.h, z18.h\n"
+      "ld1h { z19.h }, p3/Z, [x23, #4, MUL VL]\n"
+      "mov x19, x21\n"
+      "decw x20, ALL, MUL #16\n"
+      "zip1 z28.h, z21.h, z17.h\n"
+      "ld1h { z18.h }, p2/Z, [x23, #5, MUL VL]\n"
+      "zip2 z27.h, z21.h, z17.h\n"
+      "zip1 z26.h, z20.h, z16.h\n"
+      "cmp x20, #0x0\n"
+      "ld1h { z17.h }, p1/Z, [x23, #6, MUL VL]\n"
+      "zip2 z25.h, z20.h, z16.h\n"
+      "add x21, x21, %x[out_stride]\n"
+      "ld1h { z24.h }, p0/Z, [x23, #7, MUL VL]\n"
+      "addvl x23, x23, #8\n"
+      "ld1h { z16.h }, p3/Z, [x22, #4, MUL VL]\n"
+      "zip1 z23.h, z19.h, z16.h\n"
+      "zip2 z22.h, z19.h, z16.h\n"
+      "ld1h { z16.h }, p2/Z, [x22, #5, MUL VL]\n"
+      "zip1 z21.h, z18.h, z16.h\n"
+      "zip2 z20.h, z18.h, z16.h\n"
+      "ld1h { z16.h }, p1/Z, [x22, #6, MUL VL]\n"
+      "zip1 z19.h, z17.h, z16.h\n"
+      "zip2 z18.h, z17.h, z16.h\n"
+      "ld1h { z16.h }, p0/Z, [x22, #7, MUL VL]\n"
+      "st1h { z0.h }, p5, [x19]\n"
+      "addvl x22, x22, #8\n"
+      "zip1 z17.h, z24.h, z16.h\n"
+      "st1h { z31.h }, p5, [x19, #1, MUL VL]\n"
+      "zip2 z16.h, z24.h, z16.h\n"
+      "st1h { z30.h }, p5, [x19, #2, MUL VL]\n"
+      "st1h { z29.h }, p5, [x19, #3, MUL VL]\n"
+      "st1h { z28.h }, p5, [x19, #4, MUL VL]\n"
+      "st1h { z27.h }, p5, [x19, #5, MUL VL]\n"
+      "st1h { z26.h }, p5, [x19, #6, MUL VL]\n"
+      "st1h { z25.h }, p5, [x19, #7, MUL VL]\n"
+      "addvl x19, x19, #16\n"
+      "st1h { z23.h }, p5, [x19, #-8, MUL VL]\n"
+      "st1h { z22.h }, p5, [x19, #-7, MUL VL]\n"
+      "st1h { z21.h }, p5, [x19, #-6, MUL VL]\n"
+      "st1h { z20.h }, p5, [x19, #-5, MUL VL]\n"
+      "st1h { z19.h }, p5, [x19, #-4, MUL VL]\n"
+      "st1h { z18.h }, p5, [x19, #-3, MUL VL]\n"
+      "st1h { z17.h }, p5, [x19, #-2, MUL VL]\n"
+      "st1h { z16.h }, p5, [x19, #-1, MUL VL]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #16\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<16, 2, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_16VL_2x2(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
new file mode 100644
index 0000000..11c3bcc
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_16VL_2x2_fp32bf16(bfloat16 *out, const float *in, size_t width, size_t in_stride, size_t height)
+{
+    float *pad_row = reinterpret_cast<float *>(alloca(width * sizeof(float)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(float));
+    }
+
+    size_t out_stride = 16 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p7.b\n"
+      "1:"  // Main row loop: Head
+      "mov x24, %x[in]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "add %x[in], x23, %x[in_stride]\n"
+      "mov x22, %x[out]\n"
+      "csel x23, x23, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "mov x21, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x20, x21\n"
+      "whilelt p1.s, XZR, x20\n"
+      "ld1w { z16.s }, p1/Z, [x24]\n"
+      ".inst 0x658abe00  // bfcvt z0.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p0.s, XZR, x20\n"
+      "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x658abe1f  // bfcvt z31.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p6.s, XZR, x20\n"
+      "ld1w { z16.s }, p6/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x658abe1e  // bfcvt z30.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p5.s, XZR, x20\n"
+      "ld1w { z16.s }, p5/Z, [x24, #3, MUL VL]\n"
+      ".inst 0x658abe1d  // bfcvt z29.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p4.s, XZR, x20\n"
+      "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
+      ".inst 0x658abe1c  // bfcvt z28.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p3.s, XZR, x20\n"
+      "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
+      ".inst 0x658abe1b  // bfcvt z27.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "whilelt p2.s, XZR, x20\n"
+      "ld1w { z16.s }, p2/Z, [x24, #6, MUL VL]\n"
+      ".inst 0x658abe1a  // bfcvt z26.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "ld1w { z16.s }, p1/Z, [x23]\n"
+      "whilelt p1.s, XZR, x20\n"
+      ".inst 0x648abe00  // bfcvtnt z0.h, p7/M, z16.s\n"
+      "decw x20\n"
+      "ld1w { z16.s }, p1/Z, [x24, #7, MUL VL]\n"
+      "addvl x24, x24, #16\n"
+      ".inst 0x658abe19  // bfcvt z25.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x23, #1, MUL VL]\n"
+      "whilelt p0.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1f  // bfcvtnt z31.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #-8, MUL VL]\n"
+      ".inst 0x658abe18  // bfcvt z24.h, p7/M, z16.s\n"
+      "mov x19, x22\n"
+      "decw x21, ALL, MUL #16\n"
+      "ld1w { z16.s }, p6/Z, [x23, #2, MUL VL]\n"
+      "whilelt p6.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1e  // bfcvtnt z30.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p6/Z, [x24, #-7, MUL VL]\n"
+      ".inst 0x658abe17  // bfcvt z23.h, p7/M, z16.s\n"
+      "add x22, x22, %x[out_stride]\n"
+      "ld1w { z16.s }, p5/Z, [x23, #3, MUL VL]\n"
+      "whilelt p5.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1d  // bfcvtnt z29.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p5/Z, [x24, #-6, MUL VL]\n"
+      ".inst 0x658abe16  // bfcvt z22.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p4/Z, [x23, #4, MUL VL]\n"
+      "whilelt p4.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1c  // bfcvtnt z28.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p4/Z, [x24, #-5, MUL VL]\n"
+      ".inst 0x658abe15  // bfcvt z21.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x23, #5, MUL VL]\n"
+      "whilelt p3.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1b  // bfcvtnt z27.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x24, #-4, MUL VL]\n"
+      ".inst 0x658abe14  // bfcvt z20.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x23, #6, MUL VL]\n"
+      "whilelt p2.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe1a  // bfcvtnt z26.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #-3, MUL VL]\n"
+      ".inst 0x658abe13  // bfcvt z19.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x23, #7, MUL VL]\n"
+      "whilelt p1.s, XZR, x20\n"
+      "decw x20\n"
+      ".inst 0x648abe19  // bfcvtnt z25.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #-2, MUL VL]\n"
+      "addvl x23, x23, #16\n"
+      ".inst 0x658abe12  // bfcvt z18.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x23, #-8, MUL VL]\n"
+      "whilelt p0.s, XZR, x20\n"
+      "cmp x21, #0x0\n"
+      ".inst 0x648abe18  // bfcvtnt z24.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #-1, MUL VL]\n"
+      ".inst 0x658abe11  // bfcvt z17.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p6/Z, [x23, #-7, MUL VL]\n"
+      ".inst 0x648abe17  // bfcvtnt z23.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p5/Z, [x23, #-6, MUL VL]\n"
+      ".inst 0x648abe16  // bfcvtnt z22.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p4/Z, [x23, #-5, MUL VL]\n"
+      ".inst 0x648abe15  // bfcvtnt z21.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x23, #-4, MUL VL]\n"
+      ".inst 0x648abe14  // bfcvtnt z20.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x23, #-3, MUL VL]\n"
+      ".inst 0x648abe13  // bfcvtnt z19.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x23, #-2, MUL VL]\n"
+      ".inst 0x648abe12  // bfcvtnt z18.h, p7/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x23, #-1, MUL VL]\n"
+      "st1h { z0.h }, p7, [x19]\n"
+      ".inst 0x648abe11  // bfcvtnt z17.h, p7/M, z16.s\n"
+      "st1h { z31.h }, p7, [x19, #1, MUL VL]\n"
+      "st1h { z30.h }, p7, [x19, #2, MUL VL]\n"
+      "st1h { z29.h }, p7, [x19, #3, MUL VL]\n"
+      "st1h { z28.h }, p7, [x19, #4, MUL VL]\n"
+      "st1h { z27.h }, p7, [x19, #5, MUL VL]\n"
+      "st1h { z26.h }, p7, [x19, #6, MUL VL]\n"
+      "st1h { z25.h }, p7, [x19, #7, MUL VL]\n"
+      "addvl x19, x19, #16\n"
+      "st1h { z24.h }, p7, [x19, #-8, MUL VL]\n"
+      "st1h { z23.h }, p7, [x19, #-7, MUL VL]\n"
+      "st1h { z22.h }, p7, [x19, #-6, MUL VL]\n"
+      "st1h { z21.h }, p7, [x19, #-5, MUL VL]\n"
+      "st1h { z20.h }, p7, [x19, #-4, MUL VL]\n"
+      "st1h { z19.h }, p7, [x19, #-3, MUL VL]\n"
+      "st1h { z18.h }, p7, [x19, #-2, MUL VL]\n"
+      "st1h { z17.h }, p7, [x19, #-1, MUL VL]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #16\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+template<>
+void Transform<16, 2, true, VLType::SME>(
+    bfloat16 *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_16VL_2x2_fp32bf16(
+        out,
+        in + k0 * stride + x0,
+        (xmax-x0),
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
new file mode 100644
index 0000000..3e0da36
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    size_t out_stride = 1 * height * sme::get_vector_length<uint8_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p1.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x20, ALL, MUL #4\n"
+      "add x19, x23, %x[in_stride]\n"
+      "cmp x22, x20\n"
+      "add %x[in], x19, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1h { z31.h }, p1/Z, [x25]\n"
+      "sub x22, x22, x20\n"
+      "cmp x22, x20\n"
+      "ld1h { z30.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z29.h }, p1/Z, [x25, #2, MUL VL]\n"
+      "ld1h { z28.h }, p1/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      "ld1h { z27.h }, p1/Z, [x24]\n"
+      "ld1h { z26.h }, p1/Z, [x24, #1, MUL VL]\n"
+      "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
+      "ld1h { z24.h }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      "ld1h { z23.h }, p1/Z, [x23]\n"
+      "ld1h { z22.h }, p1/Z, [x23, #1, MUL VL]\n"
+      "ld1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+      "ld1h { z20.h }, p1/Z, [x23, #3, MUL VL]\n"
+      "addvl x23, x23, #4\n"
+      "ld1h { z19.h }, p1/Z, [x19]\n"
+      "ld1h { z18.h }, p1/Z, [x19, #1, MUL VL]\n"
+      "ld1h { z17.h }, p1/Z, [x19, #2, MUL VL]\n"
+      "ld1h { z16.h }, p1/Z, [x19, #3, MUL VL]\n"
+      "st1h { z31.h }, p1, [x21]\n"
+      "addvl x19, x19, #4\n"
+      "st1h { z27.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z23.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z19.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z30.h }, p1, [x21]\n"
+      "st1h { z26.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z22.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z18.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z29.h }, p1, [x21]\n"
+      "st1h { z25.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z21.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z28.h }, p1, [x21]\n"
+      "st1h { z24.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z20.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.h, XZR, x22\n"
+      "dech x22\n"
+      "ld1h { z19.h }, p0/Z, [x25]\n"
+      "cmp x22, #0x0\n"
+      "addvl x25, x25, #1\n"
+      "ld1h { z18.h }, p0/Z, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "ld1h { z17.h }, p0/Z, [x23]\n"
+      "addvl x23, x23, #1\n"
+      "ld1h { z16.h }, p0/Z, [x19]\n"
+      "addvl x19, x19, #1\n"
+      "st1h { z19.h }, p1, [x21]\n"
+      "st1h { z18.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #4\n"
+      "mov x25, %x[in]\n"
+      "cmp x20, x19\n"
+      "add %x[in], x25, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x1\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "ld1h { z19.h }, p1/Z, [x25]\n"
+      "sub x20, x20, x19\n"
+      "cmp x20, x19\n"
+      "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z17.h }, p1/Z, [x25, #2, MUL VL]\n"
+      "ld1h { z16.h }, p1/Z, [x25, #3, MUL VL]\n"
+      "st1h { z19.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "addvl x25, x25, #4\n"
+      "st1h { z18.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z16.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "whilelt p0.h, XZR, x20\n"
+      "dech x20\n"
+      "ld1h { z16.h }, p0/Z, [x25]\n"
+      "st1h { z16.h }, p1, [x21]\n"
+      "cmp x20, #0x0\n"
+      "addvl x25, x25, #1\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #1\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<1, 1, true, VLType::SME>(
+    float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(float) / 2,
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<1, 1, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<1, 1, true, VLType::SME>(
+    __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(__fp16) / 2,
+        stride * sizeof(__fp16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
new file mode 100644
index 0000000..4c5d3d3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
+
+    if (height % 4) {
+        memset(pad_row, 0, width * sizeof(uint8_t));
+    }
+
+    size_t out_stride = 1 * roundup<size_t>(height, 4) * sme::get_vector_length<uint32_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "cmp %x[height], #0x3\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "csel x22, x22, %x[pad_row], GT\n"
+      "csel x23, x23, %x[pad_row], GE\n"
+      "cmp %x[height], #0x1\n"
+      "mov x21, %x[width]\n"
+      "cntb x20\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x21, x20\n"
+      "mov x19, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1b { z17.b }, p1/Z, [x25]\n"
+      "sub x21, x21, x20\n"
+      "cmp x21, x20\n"
+      "ld1b { z18.b }, p1/Z, [x24]\n"
+      "addvl x25, x25, #1\n"
+      "addvl x24, x24, #1\n"
+      "ld1b { z16.b }, p1/Z, [x23]\n"
+      "zip1 z20.b, z17.b, z16.b\n"
+      "zip2 z19.b, z17.b, z16.b\n"
+      "addvl x23, x23, #1\n"
+      "ld1b { z16.b }, p1/Z, [x22]\n"
+      "zip1 z17.b, z18.b, z16.b\n"
+      "zip2 z18.b, z18.b, z16.b\n"
+      "addvl x22, x22, #1\n"
+      "zip1 z16.b, z20.b, z17.b\n"
+      "st1b { z16.b }, p1, [x19]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "zip2 z16.b, z20.b, z17.b\n"
+      "st1b { z16.b }, p1, [x19]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "zip1 z17.b, z19.b, z18.b\n"
+      "zip2 z16.b, z19.b, z18.b\n"
+      "st1b { z17.b }, p1, [x19]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "st1b { z16.b }, p1, [x19]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x21, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.b, XZR, x21\n"
+      "ld1b { z17.b }, p0/Z, [x25]\n"
+      "decw x21\n"
+      "ld1b { z18.b }, p0/Z, [x24]\n"
+      "cmp x21, #0x0\n"
+      "incd x25, ALL, MUL #2\n"
+      "ld1b { z16.b }, p0/Z, [x23]\n"
+      "zip1 z17.b, z17.b, z16.b\n"
+      "incd x24, ALL, MUL #2\n"
+      "incd x23, ALL, MUL #2\n"
+      "ld1b { z16.b }, p0/Z, [x22]\n"
+      "zip1 z16.b, z18.b, z16.b\n"
+      "incd x22, ALL, MUL #2\n"
+      "zip1 z16.b, z17.b, z16.b\n"
+      "st1b { z16.b }, p1, [x19]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #1\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<1, 4, true, VLType::SME>(
+    uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(uint8_t) / 1,
+        stride * sizeof(uint8_t),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<1, 4, true, VLType::SME>(
+    int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(int8_t) / 1,
+        stride * sizeof(int8_t),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
new file mode 100644
index 0000000..3fc4b06
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint16_t *pad_row = reinterpret_cast<uint16_t *>(alloca(width * sizeof(uint16_t)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(uint16_t));
+    }
+
+    size_t out_stride = 1 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p1.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x20, ALL, MUL #2\n"
+      "add x19, x23, %x[in_stride]\n"
+      "cmp x22, x20\n"
+      "add %x[in], x19, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1h { z17.h }, p1/Z, [x25]\n"
+      "sub x22, x22, x20\n"
+      "cmp x22, x20\n"
+      "ld1h { z16.h }, p1/Z, [x24]\n"
+      "zip1 z24.h, z17.h, z16.h\n"
+      "zip2 z23.h, z17.h, z16.h\n"
+      "ld1h { z17.h }, p1/Z, [x23]\n"
+      "ld1h { z16.h }, p1/Z, [x19]\n"
+      "zip1 z22.h, z17.h, z16.h\n"
+      "zip2 z21.h, z17.h, z16.h\n"
+      "ld1h { z17.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+      "zip1 z20.h, z17.h, z16.h\n"
+      "addvl x24, x24, #2\n"
+      "zip2 z19.h, z17.h, z16.h\n"
+      "ld1h { z18.h }, p1/Z, [x23, #1, MUL VL]\n"
+      "addvl x23, x23, #2\n"
+      "ld1h { z16.h }, p1/Z, [x19, #1, MUL VL]\n"
+      "st1h { z24.h }, p1, [x21]\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "addvl x19, x19, #2\n"
+      "st1h { z22.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "zip2 z16.h, z18.h, z16.h\n"
+      "st1h { z23.h }, p1, [x21]\n"
+      "st1h { z21.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z20.h }, p1, [x21]\n"
+      "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z19.h }, p1, [x21]\n"
+      "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.h, XZR, x22\n"
+      "ld1h { z17.h }, p0/Z, [x25]\n"
+      "decw x22\n"
+      "ld1h { z16.h }, p0/Z, [x24]\n"
+      "cmp x22, #0x0\n"
+      "incd x25, ALL, MUL #4\n"
+      "zip1 z18.h, z17.h, z16.h\n"
+      "ld1h { z17.h }, p0/Z, [x23]\n"
+      "incd x24, ALL, MUL #4\n"
+      "incd x23, ALL, MUL #4\n"
+      "ld1h { z16.h }, p0/Z, [x19]\n"
+      "incd x19, ALL, MUL #4\n"
+      "zip1 z16.h, z17.h, z16.h\n"
+      "st1h { z18.h }, p1, [x21]\n"
+      "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #2\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x20, x19\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "ld1h { z18.h }, p1/Z, [x25]\n"
+      "sub x20, x20, x19\n"
+      "cmp x20, x19\n"
+      "ld1h { z16.h }, p1/Z, [x24]\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "zip2 z19.h, z18.h, z16.h\n"
+      "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "st1h { z19.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "addvl x24, x24, #2\n"
+      "zip2 z16.h, z18.h, z16.h\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z16.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "whilelt p0.h, XZR, x20\n"
+      "ld1h { z17.h }, p0/Z, [x25]\n"
+      "decw x20\n"
+      "ld1h { z16.h }, p0/Z, [x24]\n"
+      "cmp x20, #0x0\n"
+      "incd x25, ALL, MUL #4\n"
+      "zip1 z16.h, z17.h, z16.h\n"
+      "incd x24, ALL, MUL #4\n"
+      "st1h { z16.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #1\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<1, 2, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL_2x2(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
new file mode 100644
index 0000000..d8fa686
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, size_t width, size_t in_stride, size_t height)
+{
+    float *pad_row = reinterpret_cast<float *>(alloca(width * sizeof(float)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(float));
+    }
+
+    size_t out_stride = 1 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p1.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x20, ALL, MUL #2\n"
+      "add x19, x23, %x[in_stride]\n"
+      "cmp x22, x20\n"
+      "add %x[in], x19, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1w { z16.s }, p1/Z, [x25]\n"
+      ".inst 0x658aa618  // bfcvt z24.h, p1/M, z16.s\n"
+      "sub x22, x22, x20\n"
+      "cmp x22, x20\n"
+      "ld1w { z16.s }, p1/Z, [x23]\n"
+      ".inst 0x658aa617  // bfcvt z23.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aa616  // bfcvt z22.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x23, #1, MUL VL]\n"
+      ".inst 0x658aa615  // bfcvt z21.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658aa614  // bfcvt z20.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
+      ".inst 0x658aa613  // bfcvt z19.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658aa612  // bfcvt z18.h, p1/M, z16.s\n"
+      "addvl x25, x25, #4\n"
+      "ld1w { z16.s }, p1/Z, [x23, #3, MUL VL]\n"
+      ".inst 0x658aa611  // bfcvt z17.h, p1/M, z16.s\n"
+      "addvl x23, x23, #4\n"
+      "ld1w { z16.s }, p1/Z, [x24]\n"
+      ".inst 0x648aa618  // bfcvtnt z24.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x19]\n"
+      ".inst 0x648aa617  // bfcvtnt z23.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x648aa616  // bfcvtnt z22.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+      ".inst 0x648aa615  // bfcvtnt z21.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648aa614  // bfcvtnt z20.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x19, #2, MUL VL]\n"
+      ".inst 0x648aa613  // bfcvtnt z19.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0x648aa612  // bfcvtnt z18.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x19, #3, MUL VL]\n"
+      "st1h { z24.h }, p1, [x21]\n"
+      "addvl x19, x19, #4\n"
+      ".inst 0x648aa611  // bfcvtnt z17.h, p1/M, z16.s\n"
+      "st1h { z23.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z22.h }, p1, [x21]\n"
+      "st1h { z21.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z20.h }, p1, [x21]\n"
+      "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z18.h }, p1, [x21]\n"
+      "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.s, XZR, x22\n"
+      "ld1w { z16.s }, p0/Z, [x25]\n"
+      ".inst 0x658aa612  // bfcvt z18.h, p1/M, z16.s\n"
+      "decw x22\n"
+      "ld1w { z16.s }, p0/Z, [x23]\n"
+      ".inst 0x658aa611  // bfcvt z17.h, p1/M, z16.s\n"
+      "cmp x22, #0x0\n"
+      "addvl x25, x25, #1\n"
+      "ld1w { z16.s }, p0/Z, [x24]\n"
+      "addvl x24, x24, #1\n"
+      "addvl x23, x23, #1\n"
+      ".inst 0x648aa612  // bfcvtnt z18.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x19]\n"
+      "addvl x19, x19, #1\n"
+      ".inst 0x648aa611  // bfcvtnt z17.h, p1/M, z16.s\n"
+      "st1h { z18.h }, p1, [x21]\n"
+      "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #2\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x20, x19\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "ld1w { z16.s }, p1/Z, [x25]\n"
+      ".inst 0x658aa614  // bfcvt z20.h, p1/M, z16.s\n"
+      "sub x20, x20, x19\n"
+      "cmp x20, x19\n"
+      "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aa613  // bfcvt z19.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658aa612  // bfcvt z18.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658aa611  // bfcvt z17.h, p1/M, z16.s\n"
+      "addvl x25, x25, #4\n"
+      "ld1w { z16.s }, p1/Z, [x24]\n"
+      ".inst 0x648aa614  // bfcvtnt z20.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x648aa613  // bfcvtnt z19.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648aa612  // bfcvtnt z18.h, p1/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+      "st1h { z20.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "addvl x24, x24, #4\n"
+      "st1h { z19.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      ".inst 0x648aa611  // bfcvtnt z17.h, p1/M, z16.s\n"
+      "st1h { z18.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "whilelt p0.s, XZR, x20\n"
+      "ld1w { z16.s }, p0/Z, [x25]\n"
+      ".inst 0x658aa611  // bfcvt z17.h, p1/M, z16.s\n"
+      "decw x20\n"
+      "ld1w { z16.s }, p0/Z, [x24]\n"
+      "cmp x20, #0x0\n"
+      "addvl x25, x25, #1\n"
+      ".inst 0x648aa611  // bfcvtnt z17.h, p1/M, z16.s\n"
+      "addvl x24, x24, #1\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #1\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+template<>
+void Transform<1, 2, true, VLType::SME>(
+    bfloat16 *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_1VL_2x2_fp32bf16(
+        out,
+        in + k0 * stride + x0,
+        (xmax-x0),
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
new file mode 100644
index 0000000..3341159
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    size_t out_stride = 2 * height * sme::get_vector_length<uint8_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p2.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x19, ALL, MUL #4\n"
+      "add x20, x23, %x[in_stride]\n"
+      "cmp x22, x19\n"
+      "add %x[in], x20, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "sub x22, x22, x19\n"
+      "ld1h { z31.h }, p2/Z, [x25]\n"
+      "cmp x22, x19\n"
+      "ld1h { z30.h }, p2/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z29.h }, p2/Z, [x25, #2, MUL VL]\n"
+      "ld1h { z28.h }, p2/Z, [x25, #3, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      "ld1h { z27.h }, p2/Z, [x24]\n"
+      "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+      "ld1h { z25.h }, p2/Z, [x24, #2, MUL VL]\n"
+      "ld1h { z24.h }, p2/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      "ld1h { z23.h }, p2/Z, [x23]\n"
+      "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+      "ld1h { z21.h }, p2/Z, [x23, #2, MUL VL]\n"
+      "ld1h { z20.h }, p2/Z, [x23, #3, MUL VL]\n"
+      "addvl x23, x23, #4\n"
+      "ld1h { z19.h }, p2/Z, [x20]\n"
+      "ld1h { z18.h }, p2/Z, [x20, #1, MUL VL]\n"
+      "ld1h { z17.h }, p2/Z, [x20, #2, MUL VL]\n"
+      "ld1h { z16.h }, p2/Z, [x20, #3, MUL VL]\n"
+      "st1h { z31.h }, p2, [x21]\n"
+      "addvl x20, x20, #4\n"
+      "st1h { z30.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z27.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z26.h }, p2, [x21, #3, MUL VL]\n"
+      "st1h { z23.h }, p2, [x21, #4, MUL VL]\n"
+      "st1h { z22.h }, p2, [x21, #5, MUL VL]\n"
+      "st1h { z19.h }, p2, [x21, #6, MUL VL]\n"
+      "st1h { z18.h }, p2, [x21, #7, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z29.h }, p2, [x21]\n"
+      "st1h { z28.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z25.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z24.h }, p2, [x21, #3, MUL VL]\n"
+      "st1h { z21.h }, p2, [x21, #4, MUL VL]\n"
+      "st1h { z20.h }, p2, [x21, #5, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+      "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "mov x19, x22\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z23.h }, p1/Z, [x25]\n"
+      "dech x19\n"
+      "dech x22, ALL, MUL #2\n"
+      "ld1h { z22.h }, p1/Z, [x24]\n"
+      "whilelt p0.h, XZR, x19\n"
+      "cmp x22, #0x0\n"
+      "ld1h { z21.h }, p0/Z, [x25, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
+      "addvl x24, x24, #2\n"
+      "ld1h { z19.h }, p1/Z, [x23]\n"
+      "ld1h { z18.h }, p0/Z, [x23, #1, MUL VL]\n"
+      "addvl x23, x23, #2\n"
+      "ld1h { z17.h }, p1/Z, [x20]\n"
+      "ld1h { z16.h }, p0/Z, [x20, #1, MUL VL]\n"
+      "addvl x20, x20, #2\n"
+      "st1h { z23.h }, p2, [x21]\n"
+      "st1h { z21.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z20.h }, p2, [x21, #3, MUL VL]\n"
+      "st1h { z19.h }, p2, [x21, #4, MUL VL]\n"
+      "st1h { z18.h }, p2, [x21, #5, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+      "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #8\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #4\n"
+      "mov x25, %x[in]\n"
+      "cmp x20, x19\n"
+      "add %x[in], x25, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x1\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "sub x20, x20, x19\n"
+      "ld1h { z19.h }, p2/Z, [x25]\n"
+      "cmp x20, x19\n"
+      "ld1h { z18.h }, p2/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z17.h }, p2/Z, [x25, #2, MUL VL]\n"
+      "ld1h { z16.h }, p2/Z, [x25, #3, MUL VL]\n"
+      "st1h { z19.h }, p2, [x21]\n"
+      "addvl x25, x25, #4\n"
+      "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z17.h }, p2, [x21]\n"
+      "st1h { z16.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z17.h }, p0/Z, [x25]\n"
+      "dech x19\n"
+      "dech x20, ALL, MUL #2\n"
+      "whilelt p0.h, XZR, x19\n"
+      "cmp x20, #0x0\n"
+      "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21]\n"
+      "addvl x25, x25, #2\n"
+      "st1h { z16.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<2, 1, true, VLType::SME>(
+    float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(float) / 2,
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<2, 1, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<2, 1, true, VLType::SME>(
+    __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(__fp16) / 2,
+        stride * sizeof(__fp16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
new file mode 100644
index 0000000..39c4999
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_2VL_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
+
+    if (height % 4) {
+        memset(pad_row, 0, width * sizeof(uint8_t));
+    }
+
+    size_t out_stride = 2 * roundup<size_t>(height, 4) * sme::get_vector_length<uint32_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "cmp %x[height], #0x3\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "csel x22, x22, %x[pad_row], GT\n"
+      "csel x23, x23, %x[pad_row], GE\n"
+      "cmp %x[height], #0x1\n"
+      "mov x21, %x[width]\n"
+      "cntb x20\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x21, x20\n"
+      "mov x19, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1b { z17.b }, p1/Z, [x25]\n"
+      "sub x21, x21, x20\n"
+      "cmp x21, x20\n"
+      "ld1b { z18.b }, p1/Z, [x24]\n"
+      "addvl x25, x25, #1\n"
+      "addvl x24, x24, #1\n"
+      "ld1b { z16.b }, p1/Z, [x23]\n"
+      "zip1 z20.b, z17.b, z16.b\n"
+      "zip2 z19.b, z17.b, z16.b\n"
+      "addvl x23, x23, #1\n"
+      "ld1b { z17.b }, p1/Z, [x22]\n"
+      "zip1 z16.b, z18.b, z17.b\n"
+      "zip2 z18.b, z18.b, z17.b\n"
+      "addvl x22, x22, #1\n"
+      "zip1 z17.b, z20.b, z16.b\n"
+      "zip2 z16.b, z20.b, z16.b\n"
+      "st1b { z17.b }, p1, [x19]\n"
+      "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "zip1 z17.b, z19.b, z18.b\n"
+      "zip2 z16.b, z19.b, z18.b\n"
+      "st1b { z17.b }, p1, [x19]\n"
+      "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x21, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.b, XZR, x21\n"
+      "ld1b { z18.b }, p0/Z, [x25]\n"
+      "decw x21, ALL, MUL #2\n"
+      "ld1b { z17.b }, p0/Z, [x24]\n"
+      "cmp x21, #0x0\n"
+      "incd x25, ALL, MUL #4\n"
+      "ld1b { z16.b }, p0/Z, [x23]\n"
+      "zip1 z18.b, z18.b, z16.b\n"
+      "incd x24, ALL, MUL #4\n"
+      "incd x23, ALL, MUL #4\n"
+      "ld1b { z16.b }, p0/Z, [x22]\n"
+      "zip1 z16.b, z17.b, z16.b\n"
+      "incd x22, ALL, MUL #4\n"
+      "zip1 z17.b, z18.b, z16.b\n"
+      "zip2 z16.b, z18.b, z16.b\n"
+      "st1b { z17.b }, p1, [x19]\n"
+      "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+      "add x19, x19, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<2, 4, true, VLType::SME>(
+    uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(uint8_t) / 1,
+        stride * sizeof(uint8_t),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<2, 4, true, VLType::SME>(
+    int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(int8_t) / 1,
+        stride * sizeof(int8_t),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
new file mode 100644
index 0000000..cfa8682
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint16_t *pad_row = reinterpret_cast<uint16_t *>(alloca(width * sizeof(uint16_t)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(uint16_t));
+    }
+
+    size_t out_stride = 2 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p1.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x20, ALL, MUL #2\n"
+      "add x19, x23, %x[in_stride]\n"
+      "cmp x22, x20\n"
+      "add %x[in], x19, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1h { z17.h }, p1/Z, [x25]\n"
+      "sub x22, x22, x20\n"
+      "cmp x22, x20\n"
+      "ld1h { z16.h }, p1/Z, [x24]\n"
+      "zip1 z24.h, z17.h, z16.h\n"
+      "zip2 z23.h, z17.h, z16.h\n"
+      "ld1h { z17.h }, p1/Z, [x23]\n"
+      "ld1h { z16.h }, p1/Z, [x19]\n"
+      "zip1 z22.h, z17.h, z16.h\n"
+      "zip2 z21.h, z17.h, z16.h\n"
+      "ld1h { z17.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+      "addvl x24, x24, #2\n"
+      "zip1 z20.h, z17.h, z16.h\n"
+      "zip2 z19.h, z17.h, z16.h\n"
+      "ld1h { z18.h }, p1/Z, [x23, #1, MUL VL]\n"
+      "addvl x23, x23, #2\n"
+      "ld1h { z16.h }, p1/Z, [x19, #1, MUL VL]\n"
+      "st1h { z24.h }, p1, [x21]\n"
+      "addvl x19, x19, #2\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "st1h { z23.h }, p1, [x21, #1, MUL VL]\n"
+      "zip2 z16.h, z18.h, z16.h\n"
+      "st1h { z22.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z21.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z20.h }, p1, [x21]\n"
+      "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "whilelt p0.h, XZR, x22\n"
+      "ld1h { z17.h }, p0/Z, [x25]\n"
+      "decw x22, ALL, MUL #2\n"
+      "ld1h { z16.h }, p0/Z, [x24]\n"
+      "cmp x22, #0x0\n"
+      "addvl x25, x25, #1\n"
+      "zip1 z20.h, z17.h, z16.h\n"
+      "ld1h { z19.h }, p0/Z, [x23]\n"
+      "addvl x24, x24, #1\n"
+      "addvl x23, x23, #1\n"
+      "zip2 z18.h, z17.h, z16.h\n"
+      "ld1h { z16.h }, p0/Z, [x19]\n"
+      "addvl x19, x19, #1\n"
+      "zip1 z17.h, z19.h, z16.h\n"
+      "zip2 z16.h, z19.h, z16.h\n"
+      "st1h { z20.h }, p1, [x21]\n"
+      "st1h { z18.h }, p1, [x21, #1, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #2\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x20, x19\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "ld1h { z18.h }, p1/Z, [x25]\n"
+      "sub x20, x20, x19\n"
+      "cmp x20, x19\n"
+      "ld1h { z16.h }, p1/Z, [x24]\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "zip2 z19.h, z18.h, z16.h\n"
+      "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "addvl x24, x24, #2\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "zip2 z16.h, z18.h, z16.h\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "whilelt p0.h, XZR, x20\n"
+      "ld1h { z18.h }, p0/Z, [x25]\n"
+      "decw x20, ALL, MUL #2\n"
+      "ld1h { z16.h }, p0/Z, [x24]\n"
+      "cmp x20, #0x0\n"
+      "addvl x25, x25, #1\n"
+      "zip1 z17.h, z18.h, z16.h\n"
+      "addvl x24, x24, #1\n"
+      "zip2 z16.h, z18.h, z16.h\n"
+      "st1h { z17.h }, p1, [x21]\n"
+      "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<2, 2, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL_2x2(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
new file mode 100644
index 0000000..65936d9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, size_t width, size_t in_stride, size_t height)
+{
+    float *pad_row = reinterpret_cast<float *>(alloca(width * sizeof(float)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(float));
+    }
+
+    size_t out_stride = 2 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p2.b\n"
+      "blt 6f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "mov x22, %x[width]\n"
+      "cnth x19, ALL, MUL #2\n"
+      "add x20, x23, %x[in_stride]\n"
+      "cmp x22, x19\n"
+      "add %x[in], x20, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "blt 3f\n"
+      "2:"  // Main row loop: Unroll column loop
+      "ld1w { z16.s }, p2/Z, [x25]\n"
+      ".inst 0x658aaa18  // bfcvt z24.h, p2/M, z16.s\n"
+      "sub x22, x22, x19\n"
+      "cmp x22, x19\n"
+      "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aaa17  // bfcvt z23.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x23]\n"
+      ".inst 0x658aaa16  // bfcvt z22.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
+      ".inst 0x658aaa15  // bfcvt z21.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658aaa14  // bfcvt z20.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658aaa13  // bfcvt z19.h, p2/M, z16.s\n"
+      "addvl x25, x25, #4\n"
+      "ld1w { z16.s }, p2/Z, [x23, #2, MUL VL]\n"
+      ".inst 0x658aaa12  // bfcvt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x23, #3, MUL VL]\n"
+      ".inst 0x658aaa11  // bfcvt z17.h, p2/M, z16.s\n"
+      "addvl x23, x23, #4\n"
+      "ld1w { z16.s }, p2/Z, [x24]\n"
+      ".inst 0x648aaa18  // bfcvtnt z24.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x648aaa17  // bfcvtnt z23.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x20]\n"
+      ".inst 0x648aaa16  // bfcvtnt z22.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x20, #1, MUL VL]\n"
+      ".inst 0x648aaa15  // bfcvtnt z21.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648aaa14  // bfcvtnt z20.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0x648aaa13  // bfcvtnt z19.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x20, #2, MUL VL]\n"
+      ".inst 0x648aaa12  // bfcvtnt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x20, #3, MUL VL]\n"
+      "st1h { z24.h }, p2, [x21]\n"
+      "addvl x20, x20, #4\n"
+      ".inst 0x648aaa11  // bfcvtnt z17.h, p2/M, z16.s\n"
+      "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z21.h }, p2, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z20.h }, p2, [x21]\n"
+      "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z18.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 2b\n"
+      "3:"  // Main row loop: Unroll column loop skip
+      "cbz x22, 5f\n"
+      "4:"  // Main row loop: Column loop
+      "mov x19, x22\n"
+      "whilelt p1.s, XZR, x19\n"
+      "ld1w { z16.s }, p1/Z, [x25]\n"
+      ".inst 0x658aaa14  // bfcvt z20.h, p2/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aaa13  // bfcvt z19.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x23]\n"
+      ".inst 0x658aaa12  // bfcvt z18.h, p2/M, z16.s\n"
+      "decw x22, ALL, MUL #2\n"
+      "cmp x22, #0x0\n"
+      "ld1w { z16.s }, p0/Z, [x23, #1, MUL VL]\n"
+      ".inst 0x658aaa11  // bfcvt z17.h, p2/M, z16.s\n"
+      "addvl x25, x25, #2\n"
+      "addvl x23, x23, #2\n"
+      "ld1w { z16.s }, p1/Z, [x24]\n"
+      ".inst 0x648aaa14  // bfcvtnt z20.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+      "addvl x24, x24, #2\n"
+      ".inst 0x648aaa13  // bfcvtnt z19.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x20]\n"
+      ".inst 0x648aaa12  // bfcvtnt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x20, #1, MUL VL]\n"
+      "addvl x20, x20, #2\n"
+      ".inst 0x648aaa11  // bfcvtnt z17.h, p2/M, z16.s\n"
+      "st1h { z20.h }, p2, [x21]\n"
+      "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z18.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 4b\n"
+      "5:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 1b\n"
+      "cbz %x[height], 12f\n"
+      "6:"  // Main loop skip
+      "7:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "cnth x19, ALL, MUL #2\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "cmp x20, x19\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "blt 9f\n"
+      "8:"  // Tail row loop: Unroll column loop
+      "ld1w { z16.s }, p2/Z, [x25]\n"
+      ".inst 0x658aaa14  // bfcvt z20.h, p2/M, z16.s\n"
+      "sub x20, x20, x19\n"
+      "cmp x20, x19\n"
+      "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aaa13  // bfcvt z19.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658aaa12  // bfcvt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658aaa11  // bfcvt z17.h, p2/M, z16.s\n"
+      "addvl x25, x25, #4\n"
+      "ld1w { z16.s }, p2/Z, [x24]\n"
+      ".inst 0x648aaa14  // bfcvtnt z20.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x648aaa13  // bfcvtnt z19.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648aaa12  // bfcvtnt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+      "st1h { z20.h }, p2, [x21]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0x648aaa11  // bfcvtnt z17.h, p2/M, z16.s\n"
+      "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "st1h { z18.h }, p2, [x21]\n"
+      "st1h { z17.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bge 8b\n"
+      "9:"  // Tail row loop: Unroll column loop skip
+      "cbz x20, 11f\n"
+      "10:"  // Tail row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p1.s, XZR, x19\n"
+      "ld1w { z16.s }, p1/Z, [x25]\n"
+      ".inst 0x658aaa12  // bfcvt z18.h, p2/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658aaa11  // bfcvt z17.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24]\n"
+      "decw x20, ALL, MUL #2\n"
+      "cmp x20, #0x0\n"
+      ".inst 0x648aaa12  // bfcvtnt z18.h, p2/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "addvl x24, x24, #2\n"
+      ".inst 0x648aaa11  // bfcvtnt z17.h, p2/M, z16.s\n"
+      "st1h { z18.h }, p2, [x21]\n"
+      "st1h { z17.h }, p2, [x21, #1, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 10b\n"
+      "11:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #2\n"
+      "bge 7b\n"
+      "12:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+template<>
+void Transform<2, 2, true, VLType::SME>(
+    bfloat16 *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_2VL_2x2_fp32bf16(
+        out,
+        in + k0 * stride + x0,
+        (xmax-x0),
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
new file mode 100644
index 0000000..7b783c3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    size_t out_stride = 4 * height * sme::get_vector_length<uint8_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p4.b\n"
+      "blt 4f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p3.h, XZR, x19\n"
+      "ld1h { z31.h }, p3/Z, [x25]\n"
+      "dech x19\n"
+      "whilelt p2.h, XZR, x19\n"
+      "ld1h { z30.h }, p2/Z, [x25, #1, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z29.h }, p1/Z, [x25, #2, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z28.h }, p0/Z, [x25, #3, MUL VL]\n"
+      "mov x19, x21\n"
+      "dech x20, ALL, MUL #4\n"
+      "ld1h { z27.h }, p3/Z, [x24]\n"
+      "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+      "cmp x20, #0x0\n"
+      "addvl x25, x25, #4\n"
+      "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "ld1h { z24.h }, p0/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      "ld1h { z23.h }, p3/Z, [x23]\n"
+      "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+      "ld1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+      "ld1h { z20.h }, p0/Z, [x23, #3, MUL VL]\n"
+      "addvl x23, x23, #4\n"
+      "ld1h { z19.h }, p3/Z, [x22]\n"
+      "ld1h { z18.h }, p2/Z, [x22, #1, MUL VL]\n"
+      "ld1h { z17.h }, p1/Z, [x22, #2, MUL VL]\n"
+      "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
+      "st1h { z31.h }, p4, [x19]\n"
+      "addvl x22, x22, #4\n"
+      "st1h { z30.h }, p4, [x19, #1, MUL VL]\n"
+      "st1h { z29.h }, p4, [x19, #2, MUL VL]\n"
+      "st1h { z28.h }, p4, [x19, #3, MUL VL]\n"
+      "st1h { z27.h }, p4, [x19, #4, MUL VL]\n"
+      "st1h { z26.h }, p4, [x19, #5, MUL VL]\n"
+      "st1h { z25.h }, p4, [x19, #6, MUL VL]\n"
+      "st1h { z24.h }, p4, [x19, #7, MUL VL]\n"
+      "addvl x19, x19, #16\n"
+      "st1h { z23.h }, p4, [x19, #-8, MUL VL]\n"
+      "st1h { z22.h }, p4, [x19, #-7, MUL VL]\n"
+      "st1h { z21.h }, p4, [x19, #-6, MUL VL]\n"
+      "st1h { z20.h }, p4, [x19, #-5, MUL VL]\n"
+      "st1h { z19.h }, p4, [x19, #-4, MUL VL]\n"
+      "st1h { z18.h }, p4, [x19, #-3, MUL VL]\n"
+      "st1h { z17.h }, p4, [x19, #-2, MUL VL]\n"
+      "st1h { z16.h }, p4, [x19, #-1, MUL VL]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #16\n"
+      "bge 1b\n"
+      "cbz %x[height], 8f\n"
+      "4:"  // Main loop skip
+      "5:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add %x[in], x25, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x1\n"
+      "mov x20, %x[width]\n"
+      "6:"  // Tail row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z19.h }, p0/Z, [x25]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z18.h }, p0/Z, [x25, #1, MUL VL]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z17.h }, p0/Z, [x25, #2, MUL VL]\n"
+      "dech x19\n"
+      "dech x20, ALL, MUL #4\n"
+      "whilelt p0.h, XZR, x19\n"
+      "cmp x20, #0x0\n"
+      "ld1h { z16.h }, p0/Z, [x25, #3, MUL VL]\n"
+      "st1h { z19.h }, p4, [x21]\n"
+      "addvl x25, x25, #4\n"
+      "st1h { z18.h }, p4, [x21, #1, MUL VL]\n"
+      "st1h { z17.h }, p4, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p4, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 6b\n"
+      "7:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 5b\n"
+      "8:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<4, 1, true, VLType::SME>(
+    float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(float) / 2,
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<4, 1, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<4, 1, true, VLType::SME>(
+    __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(__fp16) / 2,
+        stride * sizeof(__fp16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
new file mode 100644
index 0000000..0429bb0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
+
+    if (height % 4) {
+        memset(pad_row, 0, width * sizeof(uint8_t));
+    }
+
+    size_t out_stride = 4 * roundup<size_t>(height, 4) * sme::get_vector_length<uint32_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "ptrue p1.b\n"
+      "1:"  // Main row loop: Head
+      "mov x24, %x[in]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "add x21, x22, %x[in_stride]\n"
+      "cmp %x[height], #0x3\n"
+      "add %x[in], x21, %x[in_stride]\n"
+      "csel x21, x21, %x[pad_row], GT\n"
+      "csel x22, x22, %x[pad_row], GE\n"
+      "cmp %x[height], #0x1\n"
+      "mov x20, %x[out]\n"
+      "csel x23, x23, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "mov x19, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "whilelt p0.b, XZR, x19\n"
+      "ld1b { z17.b }, p0/Z, [x24]\n"
+      "decw x19, ALL, MUL #4\n"
+      "ld1b { z19.b }, p0/Z, [x23]\n"
+      "cmp x19, #0x0\n"
+      "addvl x24, x24, #1\n"
+      "ld1b { z16.b }, p0/Z, [x22]\n"
+      "zip1 z18.b, z17.b, z16.b\n"
+      "zip2 z20.b, z17.b, z16.b\n"
+      "addvl x23, x23, #1\n"
+      "ld1b { z16.b }, p0/Z, [x21]\n"
+      "zip1 z17.b, z19.b, z16.b\n"
+      "zip2 z19.b, z19.b, z16.b\n"
+      "addvl x22, x22, #1\n"
+      "addvl x21, x21, #1\n"
+      "zip1 z16.b, z18.b, z17.b\n"
+      "zip2 z18.b, z18.b, z17.b\n"
+      "st1b { z16.b }, p1, [x20]\n"
+      "zip1 z17.b, z20.b, z19.b\n"
+      "zip2 z16.b, z20.b, z19.b\n"
+      "st1b { z18.b }, p1, [x20, #1, MUL VL]\n"
+      "st1b { z17.b }, p1, [x20, #2, MUL VL]\n"
+      "st1b { z16.b }, p1, [x20, #3, MUL VL]\n"
+      "add x20, x20, %x[out_stride]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 1b\n"
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<4, 4, true, VLType::SME>(
+    uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(uint8_t) / 1,
+        stride * sizeof(uint8_t),
+        (kmax-k0)
+    );
+}
+
+template<>
+void Transform<4, 4, true, VLType::SME>(
+    int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL_1x4(
+        reinterpret_cast<uint8_t *>(out),
+        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(int8_t) / 1,
+        stride * sizeof(int8_t),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
new file mode 100644
index 0000000..d1534db
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
+{
+    uint16_t *pad_row = reinterpret_cast<uint16_t *>(alloca(width * sizeof(uint16_t)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(uint16_t));
+    }
+
+    size_t out_stride = 4 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p2.b\n"
+      "blt 4f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z19.h }, p1/Z, [x25]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z18.h }, p0/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z17.h }, p1/Z, [x24]\n"
+      "decw x20, ALL, MUL #4\n"
+      "cmp x20, #0x0\n"
+      "zip1 z24.h, z19.h, z17.h\n"
+      "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "addvl x24, x24, #2\n"
+      "zip2 z23.h, z19.h, z17.h\n"
+      "ld1h { z17.h }, p1/Z, [x23]\n"
+      "zip1 z22.h, z18.h, z16.h\n"
+      "zip2 z21.h, z18.h, z16.h\n"
+      "ld1h { z20.h }, p0/Z, [x23, #1, MUL VL]\n"
+      "addvl x23, x23, #2\n"
+      "ld1h { z16.h }, p1/Z, [x22]\n"
+      "zip1 z19.h, z17.h, z16.h\n"
+      "zip2 z18.h, z17.h, z16.h\n"
+      "ld1h { z16.h }, p0/Z, [x22, #1, MUL VL]\n"
+      "addvl x22, x22, #2\n"
+      "zip1 z17.h, z20.h, z16.h\n"
+      "zip2 z16.h, z20.h, z16.h\n"
+      "st1h { z24.h }, p2, [x21]\n"
+      "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z21.h }, p2, [x21, #3, MUL VL]\n"
+      "st1h { z19.h }, p2, [x21, #4, MUL VL]\n"
+      "st1h { z18.h }, p2, [x21, #5, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+      "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #8\n"
+      "bge 1b\n"
+      "cbz %x[height], 8f\n"
+      "4:"  // Main loop skip
+      "5:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "mov x20, %x[width]\n"
+      "6:"  // Tail row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p1.h, XZR, x19\n"
+      "ld1h { z18.h }, p1/Z, [x25]\n"
+      "dech x19\n"
+      "whilelt p0.h, XZR, x19\n"
+      "ld1h { z20.h }, p0/Z, [x25, #1, MUL VL]\n"
+      "ld1h { z17.h }, p1/Z, [x24]\n"
+      "decw x20, ALL, MUL #4\n"
+      "cmp x20, #0x0\n"
+      "zip1 z19.h, z18.h, z17.h\n"
+      "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
+      "addvl x25, x25, #2\n"
+      "addvl x24, x24, #2\n"
+      "zip2 z18.h, z18.h, z17.h\n"
+      "zip1 z17.h, z20.h, z16.h\n"
+      "zip2 z16.h, z20.h, z16.h\n"
+      "st1h { z19.h }, p2, [x21]\n"
+      "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
+      "st1h { z17.h }, p2, [x21, #2, MUL VL]\n"
+      "st1h { z16.h }, p2, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 6b\n"
+      "7:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 5b\n"
+      "8:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+
+template<>
+void Transform<4, 2, true, VLType::SME>(
+    bfloat16 *out, const bfloat16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL_2x2(
+        reinterpret_cast<uint16_t *>(out),
+        reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+        (xmax-x0) * sizeof(bfloat16) / 2,
+        stride * sizeof(bfloat16),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
new file mode 100644
index 0000000..59ac4bf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+namespace {
+
+void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, size_t width, size_t in_stride, size_t height)
+{
+    float *pad_row = reinterpret_cast<float *>(alloca(width * sizeof(float)));
+
+    if (height % 2) {
+        memset(pad_row, 0, width * sizeof(float));
+    }
+
+    size_t out_stride = 4 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
+
+    __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "cmp %x[height], #0x4\n"
+      "ptrue p4.b\n"
+      "blt 4f\n"
+      "1:"  // Main row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "add x23, x24, %x[in_stride]\n"
+      "add x22, x23, %x[in_stride]\n"
+      "add %x[in], x22, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "sub %x[height], %x[height], #0x4\n"
+      "mov x20, %x[width]\n"
+      "2:"  // Main row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p3.s, XZR, x19\n"
+      "ld1w { z16.s }, p3/Z, [x25]\n"
+      ".inst 0x658ab218  // bfcvt z24.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p2.s, XZR, x19\n"
+      "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658ab217  // bfcvt z23.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p1.s, XZR, x19\n"
+      "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658ab216  // bfcvt z22.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658ab215  // bfcvt z21.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x23]\n"
+      ".inst 0x658ab214  // bfcvt z20.h, p4/M, z16.s\n"
+      "decw x20, ALL, MUL #4\n"
+      "cmp x20, #0x0\n"
+      "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
+      ".inst 0x658ab213  // bfcvt z19.h, p4/M, z16.s\n"
+      "addvl x25, x25, #4\n"
+      "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
+      ".inst 0x658ab212  // bfcvt z18.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
+      ".inst 0x658ab211  // bfcvt z17.h, p4/M, z16.s\n"
+      "addvl x23, x23, #4\n"
+      "ld1w { z16.s }, p3/Z, [x24]\n"
+      ".inst 0x648ab218  // bfcvtnt z24.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+      ".inst 0x648ab217  // bfcvtnt z23.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648ab216  // bfcvtnt z22.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0x648ab215  // bfcvtnt z21.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x22]\n"
+      ".inst 0x648ab214  // bfcvtnt z20.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x22, #1, MUL VL]\n"
+      ".inst 0x648ab213  // bfcvtnt z19.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x22, #2, MUL VL]\n"
+      ".inst 0x648ab212  // bfcvtnt z18.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x22, #3, MUL VL]\n"
+      "addvl x22, x22, #4\n"
+      ".inst 0x648ab211  // bfcvtnt z17.h, p4/M, z16.s\n"
+      "st1h { z24.h }, p4, [x21]\n"
+      "st1h { z23.h }, p4, [x21, #1, MUL VL]\n"
+      "st1h { z22.h }, p4, [x21, #2, MUL VL]\n"
+      "st1h { z21.h }, p4, [x21, #3, MUL VL]\n"
+      "st1h { z20.h }, p4, [x21, #4, MUL VL]\n"
+      "st1h { z19.h }, p4, [x21, #5, MUL VL]\n"
+      "st1h { z18.h }, p4, [x21, #6, MUL VL]\n"
+      "st1h { z17.h }, p4, [x21, #7, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 2b\n"
+      "3:"  // Main row loop: Column loop skip
+      "cmp %x[height], #0x4\n"
+      "addvl %x[out], %x[out], #8\n"
+      "bge 1b\n"
+      "cbz %x[height], 8f\n"
+      "4:"  // Main loop skip
+      "5:"  // Tail row loop: Head
+      "mov x25, %x[in]\n"
+      "add x24, x25, %x[in_stride]\n"
+      "cmp %x[height], #0x1\n"
+      "add %x[in], x24, %x[in_stride]\n"
+      "mov x21, %x[out]\n"
+      "csel x24, x24, %x[pad_row], GT\n"
+      "sub %x[height], %x[height], #0x2\n"
+      "mov x20, %x[width]\n"
+      "6:"  // Tail row loop: Column loop
+      "mov x19, x20\n"
+      "whilelt p3.s, XZR, x19\n"
+      "ld1w { z16.s }, p3/Z, [x25]\n"
+      ".inst 0x658ab214  // bfcvt z20.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p2.s, XZR, x19\n"
+      "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+      ".inst 0x658ab213  // bfcvt z19.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p1.s, XZR, x19\n"
+      "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+      ".inst 0x658ab212  // bfcvt z18.h, p4/M, z16.s\n"
+      "decw x19\n"
+      "whilelt p0.s, XZR, x19\n"
+      "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
+      ".inst 0x658ab211  // bfcvt z17.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p3/Z, [x24]\n"
+      "decw x20, ALL, MUL #4\n"
+      "cmp x20, #0x0\n"
+      ".inst 0x648ab214  // bfcvtnt z20.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+      "addvl x25, x25, #4\n"
+      ".inst 0x648ab213  // bfcvtnt z19.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+      ".inst 0x648ab212  // bfcvtnt z18.h, p4/M, z16.s\n"
+      "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+      "addvl x24, x24, #4\n"
+      ".inst 0x648ab211  // bfcvtnt z17.h, p4/M, z16.s\n"
+      "st1h { z20.h }, p4, [x21]\n"
+      "st1h { z19.h }, p4, [x21, #1, MUL VL]\n"
+      "st1h { z18.h }, p4, [x21, #2, MUL VL]\n"
+      "st1h { z17.h }, p4, [x21, #3, MUL VL]\n"
+      "add x21, x21, %x[out_stride]\n"
+      "bgt 6b\n"
+      "7:"  // Tail row loop: Column loop skip
+      "cmp %x[height], #0x1\n"
+      "addvl %x[out], %x[out], #4\n"
+      "bge 5b\n"
+      "8:"  // Done
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
+      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    );
+}
+
+} // anonymous namespace
+template<>
+void Transform<4, 2, true, VLType::SME>(
+    bfloat16 *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+    sme_transpose_interleave_4VL_2x2_fp32bf16(
+        out,
+        in + k0 * stride + x0,
+        (xmax-x0),
+        stride * sizeof(float),
+        (kmax-k0)
+    );
+}
+
+#endif
diff --git a/src/core/NEON/kernels/arm_gemm/utils.hpp b/src/core/NEON/kernels/arm_gemm/utils.hpp
index d7b5398..a28ddad 100644
--- a/src/core/NEON/kernels/arm_gemm/utils.hpp
+++ b/src/core/NEON/kernels/arm_gemm/utils.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -80,6 +80,7 @@
 enum class VLType {
     None,
     SVE,
+    SME
 };
 
 template<typename T>
@@ -191,6 +192,20 @@
 #endif // defined(__aarch64__)
 }
 
+#ifdef ARM_COMPUTE_ENABLE_SME
+namespace sme {
+
+// function from misc-sve.cpp
+extern unsigned int raw_vector_length();
+
+template <typename T>
+inline unsigned long get_vector_length() {
+    return raw_vector_length() / sizeof(T);
+}
+
+} // namespace sme
+#endif // ARM_COMPUTE_ENABLE_SME
+
 // get_vector_length(VLType): Returns vector length for type "T".
 //
 // This has the same requirements and constraints as the SVE-only form above, so we call into that code for SVE.
@@ -198,6 +213,10 @@
 template <typename T>
 inline unsigned long get_vector_length(VLType vl_type) {
   switch (vl_type) {
+#ifdef ARM_COMPUTE_ENABLE_SME
+    case VLType::SME:
+      return sme::get_vector_length<T>();
+#endif // ARM_COMPUTE_ENABLE_SME
     case VLType::SVE:
       return get_vector_length<T>();
     default:
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
new file mode 100644
index 0000000..e2397cd
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace input_transform {
+
+void sme_fp32_mla_6x6(
+  const unsigned int num_channels,
+  const float *input,
+  const size_t input_row_stride,
+  const size_t input_col_stride,
+  float *output,
+  const size_t output_col_stride
+)
+{
+  const float B_values[4] = { 1.0f, 2.0f, 4.0f, 5.0f };
+  long long_channels = num_channels;
+
+  // Generated by armasmgen (February 04th, 2021)
+  __asm__ __volatile__(
+      ".inst 0xd503477f  // SMSTART ZA\n"
+      "fmov z16.s, #4.0\n"
+      "ptrue p1.b\n"
+      "ld1rqw { z2.s }, p1/Z, [%x[B_values]]\n"
+      "add x16, %x[input_row_0], %x[input_row_stride], LSL #2\n"
+      "add x15, %x[output_row_0], %x[output_row_stride], LSL #2\n"
+      "add x14, %x[input_row_0], %x[input_row_stride], LSL #3\n"
+      "add x13, %x[output_row_0], %x[output_row_stride], LSL #3\n"
+      "add x12, x14, %x[input_row_stride], LSL #2\n"
+      "add x11, x13, %x[output_row_stride], LSL #2\n"
+      "add x10, %x[input_row_0], %x[input_row_stride], LSL #4\n"
+      "add x9, %x[output_row_0], %x[output_row_stride], LSL #4\n"
+      "add x28, x10, %x[input_row_stride], LSL #2\n"
+      "add x27, x9, %x[output_row_stride], LSL #2\n"
+      "lsl x26, %x[input_col_1_stride], #0x1\n"
+      "lsl x25, %x[output_col_1_stride], #0x1\n"
+      "add x24, x26, %x[input_col_1_stride]\n"
+      "add x23, x25, %x[output_col_1_stride]\n"
+      "lsl x22, %x[input_col_1_stride], #0x2\n"
+      "lsl x21, %x[output_col_1_stride], #0x2\n"
+      "add x20, x22, %x[input_col_1_stride]\n"
+      "add x19, x21, %x[output_col_1_stride]\n"
+      "whilelt p0.s, XZR, %x[num_channels]\n"
+      "beq 2f\n"
+      "1:"  // channel_loop
+      "ld1w { z31.s }, p0/Z, [%x[input_row_0]]\n"
+      "decw %x[num_channels]\n"
+      "ld1w { z28.s }, p0/Z, [%x[input_row_0], %x[input_col_1_stride], LSL #2]\n"
+      "fmul z13.s, z28.s, z2.s[1]\n"
+      "ld1w { z27.s }, p0/Z, [%x[input_row_0], x26, LSL #2]\n"
+      "ld1w { z11.s }, p0/Z, [%x[input_row_0], x24, LSL #2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "ld1w { z7.s }, p0/Z, [%x[input_row_0], x22, LSL #2]\n"
+      "fsub z15.s, z7.s, z27.s\n"
+      "fmad z31.s, p1/M, z16.s, z7.s\n"
+      "ld1w { z3.s }, p0/Z, [%x[input_row_0], x20, LSL #2]\n"
+      "fmla z13.s, z11.s, z2.s[1]\n"
+      "ld1w { z12.s }, p0/Z, [x14]\n"
+      "incb %x[input_row_0]\n"
+      "fmls z31.s, z27.s, z2.s[3]\n"
+      "ld1w { z14.s }, p0/Z, [x14, %x[input_col_1_stride], LSL #2]\n"
+      "fsub z25.s, z15.s, z13.s\n"
+      "fadd z8.s, z13.s, z15.s\n"
+      "ld1w { z24.s }, p0/Z, [x14, x26, LSL #2]\n"
+      "fmsb z27.s, p1/M, z16.s, z7.s\n"
+      "ld1w { z22.s }, p0/Z, [x14, x24, LSL #2]\n"
+      "fmul z7.s, z28.s, z2.s[2]\n"
+      "ld1w { z1.s }, p0/Z, [x14, x22, LSL #2]\n"
+      "fsub z15.s, z1.s, z24.s\n"
+      "fneg z7.s, p1/M, z7.s\n"
+      "ld1w { z20.s }, p0/Z, [x14, x20, LSL #2]\n"
+      "fadd z7.s, z7.s, z11.s\n"
+      "ld1w { z29.s }, p0/Z, [x10]\n"
+      "incb x14\n"
+      "fmad z28.s, p1/M, z16.s, z3.s\n"
+      "ld1w { z10.s }, p0/Z, [x10, %x[input_col_1_stride], LSL #2]\n"
+      "fmad z12.s, p1/M, z16.s, z1.s\n"
+      "ld1w { z18.s }, p0/Z, [x10, x26, LSL #2]\n"
+      "fmul z13.s, z14.s, z2.s[1]\n"
+      "ld1w { z19.s }, p0/Z, [x10, x24, LSL #2]\n"
+      "fadd z17.s, z7.s, z27.s\n"
+      "ld1w { z9.s }, p0/Z, [x10, x22, LSL #2]\n"
+      "fsub z27.s, z27.s, z7.s\n"
+      "fmls z28.s, z11.s, z2.s[3]\n"
+      "ld1w { z21.s }, p0/Z, [x10, x20, LSL #2]\n"
+      "incb x10\n"
+      "fmls z12.s, z24.s, z2.s[3]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z22.s, z2.s[1]\n"
+      "fsub z30.s, z15.s, z13.s\n"
+      "fadd z4.s, z13.s, z15.s\n"
+      "fmsb z24.s, p1/M, z16.s, z1.s\n"
+      "fsub z15.s, z9.s, z18.s\n"
+      "fmul z1.s, z14.s, z2.s[2]\n"
+      "fmad z14.s, p1/M, z16.s, z20.s\n"
+      "fmad z29.s, p1/M, z16.s, z9.s\n"
+      "fmul z13.s, z10.s, z2.s[1]\n"
+      "fneg z1.s, p1/M, z1.s\n"
+      "fadd z1.s, z1.s, z22.s\n"
+      "fmls z14.s, z22.s, z2.s[3]\n"
+      "fmls z29.s, z18.s, z2.s[3]\n"
+      "fadd z5.s, z1.s, z24.s\n"
+      "fsub z24.s, z24.s, z1.s\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z19.s, z2.s[1]\n"
+      "fsub z23.s, z15.s, z13.s\n"
+      "fadd z11.s, z13.s, z15.s\n"
+      "fmsb z18.s, p1/M, z16.s, z9.s\n"
+      "fmul z9.s, z10.s, z2.s[2]\n"
+      "fmad z10.s, p1/M, z16.s, z21.s\n"
+      "fmad z31.s, p1/M, z16.s, z29.s\n"
+      "fmad z8.s, p1/M, z16.s, z11.s\n"
+      "fneg z9.s, p1/M, z9.s\n"
+      "fadd z9.s, z9.s, z19.s\n"
+      "fmls z10.s, z19.s, z2.s[3]\n"
+      "fmls z31.s, z12.s, z2.s[3]\n"
+      "st1w { z31.s }, p0, [%x[output_row_0]]\n"
+      "fadd z26.s, z9.s, z18.s\n"
+      "fsub z18.s, z18.s, z9.s\n"
+      "fmls z8.s, z4.s, z2.s[3]\n"
+      "fmad z25.s, p1/M, z16.s, z23.s\n"
+      "fmad z28.s, p1/M, z16.s, z10.s\n"
+      "fmad z17.s, p1/M, z16.s, z26.s\n"
+      "fmad z27.s, p1/M, z16.s, z18.s\n"
+      "fmls z25.s, z30.s, z2.s[3]\n"
+      "fmls z28.s, z14.s, z2.s[3]\n"
+      "fmls z17.s, z5.s, z2.s[3]\n"
+      "st1w { z17.s }, p0, [%x[output_row_0], %x[output_col_1_stride], LSL #2]\n"
+      "fmls z27.s, z24.s, z2.s[3]\n"
+      "st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n"
+      "st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n"
+      "st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n"
+      "st1w { z28.s }, p0, [%x[output_row_0], x19, LSL #2]\n"
+      "incb %x[output_row_0]\n"
+      "ld1w { z19.s }, p0/Z, [x16]\n"
+      "ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n"
+      "fmul z13.s, z7.s, z2.s[1]\n"
+      "ld1w { z6.s }, p0/Z, [x16, x26, LSL #2]\n"
+      "ld1w { z27.s }, p0/Z, [x16, x24, LSL #2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "ld1w { z25.s }, p0/Z, [x16, x22, LSL #2]\n"
+      "fsub z15.s, z25.s, z6.s\n"
+      "fmad z19.s, p1/M, z16.s, z25.s\n"
+      "ld1w { z20.s }, p0/Z, [x16, x20, LSL #2]\n"
+      "fmla z13.s, z27.s, z2.s[1]\n"
+      "ld1w { z0.s }, p0/Z, [x12]\n"
+      "incb x16\n"
+      "fmls z19.s, z6.s, z2.s[3]\n"
+      "ld1w { z31.s }, p0/Z, [x12, %x[input_col_1_stride], LSL #2]\n"
+      "fsub z8.s, z15.s, z13.s\n"
+      "fadd z28.s, z13.s, z15.s\n"
+      "ld1w { z1.s }, p0/Z, [x12, x26, LSL #2]\n"
+      "fmsb z6.s, p1/M, z16.s, z25.s\n"
+      "ld1w { z21.s }, p0/Z, [x12, x24, LSL #2]\n"
+      "fmul z25.s, z7.s, z2.s[2]\n"
+      "ld1w { z22.s }, p0/Z, [x12, x22, LSL #2]\n"
+      "fsub z15.s, z22.s, z1.s\n"
+      "fneg z25.s, p1/M, z25.s\n"
+      "ld1w { z17.s }, p0/Z, [x12, x20, LSL #2]\n"
+      "fadd z25.s, z25.s, z27.s\n"
+      "incb x12\n"
+      "fmad z7.s, p1/M, z16.s, z20.s\n"
+      "fmad z0.s, p1/M, z16.s, z22.s\n"
+      "fmul z13.s, z31.s, z2.s[1]\n"
+      "fadd z3.s, z25.s, z6.s\n"
+      "fsub z6.s, z6.s, z25.s\n"
+      "fmls z7.s, z27.s, z2.s[3]\n"
+      "fmls z0.s, z1.s, z2.s[3]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z21.s, z2.s[1]\n"
+      "fsub z9.s, z15.s, z13.s\n"
+      "fadd z27.s, z13.s, z15.s\n"
+      "fmsb z1.s, p1/M, z16.s, z22.s\n"
+      "fsub z15.s, z29.s, z12.s\n"
+      "fmul z22.s, z31.s, z2.s[2]\n"
+      "fmad z31.s, p1/M, z16.s, z17.s\n"
+      "fmul z13.s, z19.s, z2.s[1]\n"
+      "fmsb z12.s, p1/M, z16.s, z29.s\n"
+      "fneg z22.s, p1/M, z22.s\n"
+      "fadd z22.s, z22.s, z21.s\n"
+      "fmls z31.s, z21.s, z2.s[3]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fadd z25.s, z22.s, z1.s\n"
+      "fsub z1.s, z1.s, z22.s\n"
+      "fmla z13.s, z0.s, z2.s[1]\n"
+      "fmul z29.s, z19.s, z2.s[2]\n"
+      "fadd z22.s, z13.s, z15.s\n"
+      "st1w { z22.s }, p0, [x11]\n"
+      "fneg z29.s, p1/M, z29.s\n"
+      "fsub z22.s, z15.s, z13.s\n"
+      "fadd z29.s, z29.s, z0.s\n"
+      "st1w { z22.s }, p0, [x9]\n"
+      "fadd z22.s, z29.s, z12.s\n"
+      "fsub z15.s, z26.s, z5.s\n"
+      "fmul z13.s, z3.s, z2.s[1]\n"
+      "fsub z12.s, z12.s, z29.s\n"
+      "fmsb z5.s, p1/M, z16.s, z26.s\n"
+      "fmul z26.s, z3.s, z2.s[2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z25.s, z2.s[1]\n"
+      "fneg z26.s, p1/M, z26.s\n"
+      "fadd z26.s, z26.s, z25.s\n"
+      "fadd z21.s, z13.s, z15.s\n"
+      "st1w { z21.s }, p0, [x11, %x[output_col_1_stride], LSL #2]\n"
+      "fsub z21.s, z15.s, z13.s\n"
+      "fmul z13.s, z6.s, z2.s[1]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "st1w { z21.s }, p0, [x9, %x[output_col_1_stride], LSL #2]\n"
+      "fadd z21.s, z26.s, z5.s\n"
+      "fsub z15.s, z18.s, z24.s\n"
+      "fmla z13.s, z1.s, z2.s[1]\n"
+      "fsub z5.s, z5.s, z26.s\n"
+      "fmsb z24.s, p1/M, z16.s, z18.s\n"
+      "fmul z18.s, z6.s, z2.s[2]\n"
+      "fadd z20.s, z13.s, z15.s\n"
+      "st1w { z20.s }, p0, [x11, x25, LSL #2]\n"
+      "fneg z18.s, p1/M, z18.s\n"
+      "fsub z20.s, z15.s, z13.s\n"
+      "fadd z18.s, z18.s, z1.s\n"
+      "st1w { z20.s }, p0, [x9, x25, LSL #2]\n"
+      "fadd z20.s, z18.s, z24.s\n"
+      "fsub z15.s, z11.s, z4.s\n"
+      "fmul z13.s, z28.s, z2.s[1]\n"
+      "fsub z24.s, z24.s, z18.s\n"
+      "fmsb z4.s, p1/M, z16.s, z11.s\n"
+      "fmul z11.s, z28.s, z2.s[2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z27.s, z2.s[1]\n"
+      "fneg z11.s, p1/M, z11.s\n"
+      "fadd z11.s, z11.s, z27.s\n"
+      "fadd z26.s, z13.s, z15.s\n"
+      "st1w { z26.s }, p0, [x11, x23, LSL #2]\n"
+      "fsub z26.s, z15.s, z13.s\n"
+      "fmul z13.s, z8.s, z2.s[1]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "st1w { z26.s }, p0, [x9, x23, LSL #2]\n"
+      "fadd z26.s, z11.s, z4.s\n"
+      "fsub z15.s, z23.s, z30.s\n"
+      "fmla z13.s, z9.s, z2.s[1]\n"
+      "fsub z4.s, z4.s, z11.s\n"
+      "fmsb z30.s, p1/M, z16.s, z23.s\n"
+      "fmul z23.s, z8.s, z2.s[2]\n"
+      "fadd z18.s, z13.s, z15.s\n"
+      "st1w { z18.s }, p0, [x11, x21, LSL #2]\n"
+      "fneg z23.s, p1/M, z23.s\n"
+      "fsub z18.s, z15.s, z13.s\n"
+      "fadd z23.s, z23.s, z9.s\n"
+      "st1w { z18.s }, p0, [x9, x21, LSL #2]\n"
+      "fadd z18.s, z23.s, z30.s\n"
+      "fsub z15.s, z10.s, z14.s\n"
+      "fmul z13.s, z7.s, z2.s[1]\n"
+      "fsub z30.s, z30.s, z23.s\n"
+      "fmsb z14.s, p1/M, z16.s, z10.s\n"
+      "fmul z10.s, z7.s, z2.s[2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "fmla z13.s, z31.s, z2.s[1]\n"
+      "fneg z10.s, p1/M, z10.s\n"
+      "fadd z10.s, z10.s, z31.s\n"
+      "fadd z17.s, z13.s, z15.s\n"
+      "st1w { z17.s }, p0, [x11, x19, LSL #2]\n"
+      "fsub z17.s, z15.s, z13.s\n"
+      "incb x11\n"
+      "st1w { z17.s }, p0, [x9, x19, LSL #2]\n"
+      "fadd z17.s, z10.s, z14.s\n"
+      "fsub z14.s, z14.s, z10.s\n"
+      "st1w { z22.s }, p0, [x15]\n"
+      "incb x9\n"
+      "st1w { z12.s }, p0, [x13]\n"
+      "st1w { z21.s }, p0, [x15, %x[output_col_1_stride], LSL #2]\n"
+      "st1w { z5.s }, p0, [x13, %x[output_col_1_stride], LSL #2]\n"
+      "st1w { z20.s }, p0, [x15, x25, LSL #2]\n"
+      "st1w { z24.s }, p0, [x13, x25, LSL #2]\n"
+      "st1w { z26.s }, p0, [x15, x23, LSL #2]\n"
+      "st1w { z4.s }, p0, [x13, x23, LSL #2]\n"
+      "st1w { z18.s }, p0, [x15, x21, LSL #2]\n"
+      "st1w { z30.s }, p0, [x13, x21, LSL #2]\n"
+      "st1w { z17.s }, p0, [x15, x19, LSL #2]\n"
+      "incb x15\n"
+      "st1w { z14.s }, p0, [x13, x19, LSL #2]\n"
+      "incb x13\n"
+      "ld1w { z23.s }, p0/Z, [x28]\n"
+      "ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n"
+      "fmul z13.s, z22.s, z2.s[1]\n"
+      "ld1w { z21.s }, p0/Z, [x28, x26, LSL #2]\n"
+      "ld1w { z20.s }, p0/Z, [x28, x24, LSL #2]\n"
+      "fneg z13.s, p1/M, z13.s\n"
+      "ld1w { z26.s }, p0/Z, [x28, x22, LSL #2]\n"
+      "fsub z15.s, z26.s, z21.s\n"
+      "fmad z23.s, p1/M, z16.s, z26.s\n"
+      "ld1w { z18.s }, p0/Z, [x28, x20, LSL #2]\n"
+      "fmla z13.s, z20.s, z2.s[1]\n"
+      "incb x28\n"
+      "fmls z23.s, z21.s, z2.s[3]\n"
+      "fsub z17.s, z15.s, z13.s\n"
+      "fadd z30.s, z13.s, z15.s\n"
+      "fmsb z21.s, p1/M, z16.s, z26.s\n"
+      "fmul z26.s, z22.s, z2.s[2]\n"
+      "fmad z22.s, p1/M, z16.s, z18.s\n"
+      "fmad z19.s, p1/M, z16.s, z23.s\n"
+      "fmad z28.s, p1/M, z16.s, z30.s\n"
+      "fneg z26.s, p1/M, z26.s\n"
+      "fadd z26.s, z26.s, z20.s\n"
+      "fmls z22.s, z20.s, z2.s[3]\n"
+      "fmls z19.s, z0.s, z2.s[3]\n"
+      "st1w { z19.s }, p0, [x27]\n"
+      "fadd z23.s, z26.s, z21.s\n"
+      "fsub z21.s, z21.s, z26.s\n"
+      "fmls z28.s, z27.s, z2.s[3]\n"
+      "fmad z8.s, p1/M, z16.s, z17.s\n"
+      "fmad z7.s, p1/M, z16.s, z22.s\n"
+      "fmad z3.s, p1/M, z16.s, z23.s\n"
+      "fmad z6.s, p1/M, z16.s, z21.s\n"
+      "fmls z8.s, z9.s, z2.s[3]\n"
+      "fmls z7.s, z31.s, z2.s[3]\n"
+      "fmls z3.s, z25.s, z2.s[3]\n"
+      "st1w { z3.s }, p0, [x27, %x[output_col_1_stride], LSL #2]\n"
+      "fmls z6.s, z1.s, z2.s[3]\n"
+      "st1w { z6.s }, p0, [x27, x25, LSL #2]\n"
+      "st1w { z28.s }, p0, [x27, x23, LSL #2]\n"
+      "st1w { z8.s }, p0, [x27, x21, LSL #2]\n"
+      "st1w { z7.s }, p0, [x27, x19, LSL #2]\n"
+      "incb x27\n"
+      "whilelt p0.s, XZR, %x[num_channels]\n"
+      "bne 1b\n"
+      "2:"  // channel_loop_end
+      ".inst 0xd503467f  // SMSTOP\n"
+      : [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output)
+      : [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride)
+      : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace input_transform
+}  // namespace winograd
+}  // namespace arm_conv
+
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp
index ec4e954..df63390 100644
--- a/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp
@@ -34,6 +34,9 @@
 
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+void sme_fp32_mla_6x6(unsigned int, const float *, size_t, size_t, float *, size_t);
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
 void sve_fp32_6x6(unsigned int, const float *, size_t, size_t, float *, size_t);
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
 void a64_fp32_6x6(unsigned int, const float *, size_t, size_t, float *, size_t);
@@ -48,6 +51,9 @@
 static const TransformImplementation<float> transforms_fp32[] = {
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME)
+  { IMPL(6, 6, sme_fp32_mla_6x6, Unpadded), MethodConstraints::RequiresSME },
+#endif  // defined(ARM_COMPUTE_ENABLE_SME)
   { IMPL(6, 6, sve_fp32_6x6, Unpadded), MethodConstraints::RequiresSVE },
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
   { IMPL(6, 6, a64_fp32_6x6, Unpadded) },
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
new file mode 100644
index 0000000..9d3c751
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
@@ -0,0 +1,892 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
+#include <cstddef>
+
+namespace arm_conv {
+namespace winograd {
+namespace output_transform {
+
+void sme_fp32_mopa_4x4_3x3(
+  const unsigned int n_channels,
+  const float* inptr,
+  const size_t matrix_stride,
+  const float* bptr,
+  float* const output,
+  const size_t output_row_stride,
+  const size_t output_col_stride,
+  const float output_min,
+  const float output_max
+)
+{
+  // The below assembler uses the Kronecker product and the "vec trick" to
+  // implement the Winograd output transform (y = AT Y A) using the SME
+  // array.  This code REQUIRES that the vectors are 512b long (or longer, if
+  // we add some predication).
+  //
+  // The "vec trick" uses the identity $vec(AT Y A) = (AT (x) AT) vec(Y)$ to
+  // convert the chain of matrix multiplications into a matrix-vector
+  // product.  We then stack multiple channels of vec(Y) together to allow us
+  // to perform multiple channels of the transformation simultaneously.
+  //
+  // Since the complete matrix (AT (x) AT) is quite big [16 x 36], we compute
+  // it on the fly. To do so, we store two representations of the matrix AT.
+  // The first representation (the outer terms) contains, within each quad,
+  // four coefficients of the matrix AT.
+  const float outer_terms[32] = {
+     1, 1,  1, 1,
+     0, 1, -1, 2,
+     0, 1,  1, 4,
+     0, 1, -1, 8,
+    // The following rows are continuations of the first four rows, and each
+    // contains two columns of padding values which aren't used in the
+    // computation but are there to ensure that the coefficients end up in
+    // the right quads of the vector into which they're read.
+     1, 0,  0, 0,
+    -2, 0,  0, 0,
+     4, 0,  0, 0,
+    -8, 1,  0, 0
+  };
+  // This should be loaded completely into two Z registers.
+  //
+  // We can then use by-element FMLA to construct columns of (AT (x) AT) by
+  // multiplying elements of the outer terms against the following inner
+  // terms (again split into quads, but expected to be loaded replicated such
+  // that each of the six required Z registers contains a repeated quad of
+  // the values).
+  const float inner_terms[24] = {
+    1,  0, 0,  0,
+    1,  1, 1,  1,
+    1, -1, 1, -1,
+    1,  2, 4,  8,
+    1, -2, 4, -8,
+    0,  0, 0,  1
+  };
+
+  struct Params
+  {
+    const float *outer_terms;
+    const float *inner_terms;
+    float act_min;
+    float act_max;
+
+    Params(const float *outer_terms,
+           const float *inner_terms,
+           float act_min,
+           float act_max)
+      : outer_terms(outer_terms), inner_terms(inner_terms),
+        act_min(act_min), act_max(act_max)
+    {
+    }
+  };
+
+  Params params(outer_terms, inner_terms, output_min, output_max);
+
+  __asm__ __volatile__(
+    "ldr x20, [%x[params], %[offsetof_Params_outer_terms]]\n"
+    ".inst 0xd503477f  // SMSTART ZA\n"
+    "ptrue p5.b\n"
+    "ld1rw { z12.s }, p5/Z, [%x[params], %[offsetof_Params_act_min]]\n"
+    "ld1rw { z10.s }, p5/Z, [%x[params], %[offsetof_Params_act_max]]\n"
+    "pfalse p8.b\n"
+    "ldr x19, [%x[params], %[offsetof_Params_inner_terms]]\n"
+    "ld1w { z6.s }, p5/Z, [x20]\n"
+    "ld1w { z7.s }, p5/Z, [x20, #1, MUL VL]\n"
+    "ld1rqw { z9.s }, p5/Z, [x19]\n"
+    "ld1rqw { z8.s }, p5/Z, [x19, #16]\n"
+    "ld1rqw { z15.s }, p5/Z, [x19, #32]\n"
+    "fmul z11.s, z9.s, z6.s[0]\n"
+    "fmul z5.s, z9.s, z6.s[1]\n"
+    "ld1rqw { z4.s }, p5/Z, [x19, #48]\n"
+    "ld1rqw { z3.s }, p5/Z, [x19, #64]\n"
+    "ld1rqw { z2.s }, p5/Z, [x19, #80]\n"
+    "cbz %x[bptr], 1f\n"
+    "ptrue p8.s\n"
+    "1:"  // Set bias predicate: Done
+    ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+    "fmov z1.s, #1.0\n"
+    "mov x25, #0x0\n"
+    "cntw x24\n"
+    "cntw x23, ALL, MUL #2\n"
+    "cntw x22, ALL, MUL #3\n"
+    "whilelt p4.s, x25, %x[n_channels]\n"
+    "whilelt p3.s, x24, %x[n_channels]\n"
+    "ld1w { z31.s }, p4/Z, [%x[inptr], x25, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [%x[inptr], x24, LSL #2]\n"
+    "whilelt p2.s, x23, %x[n_channels]\n"
+    "whilelt p1.s, x22, %x[n_channels]\n"
+    "ld1w { z29.s }, p2/Z, [%x[inptr], x23, LSL #2]\n"
+    "add x21, %x[inptr], %x[matrix_stride], LSL #2\n"
+    "and p0.b, p5/Z, p8.b, p4.b\n"
+    "ld1w { z28.s }, p1/Z, [%x[inptr], x22, LSL #2]\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x25, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p3.b\n"
+    ".inst 0x8080b420  // fmopa za0.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x24, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p2.b\n"
+    ".inst 0x8080b421  // fmopa za1.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x23, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p1.b\n"
+    ".inst 0x8080b422  // fmopa za2.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x22, LSL #2]\n"
+    ".inst 0x8080b423  // fmopa za3.s, p5/M, p5/M, z1.s, z0.s\n"
+    "2:"  // Loop
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    "mov x14, #0xc\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "add x20, %x[output], %x[output_col_stride], LSL #2\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "add x19, %x[output], %x[output_row_stride], LSL #2\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z9.s, z6.s[2]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z9.s, z6.s[3]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z9.s, z7.s[0]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z9.s, z7.s[1]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z8.s, z6.s[0]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z8.s, z6.s[1]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z8.s, z6.s[2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z8.s, z6.s[3]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z8.s, z7.s[0]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z8.s, z7.s[1]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z15.s, z6.s[0]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z15.s, z6.s[1]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z15.s, z6.s[2]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z15.s, z6.s[3]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z15.s, z7.s[0]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z15.s, z7.s[1]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z4.s, z6.s[0]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z4.s, z6.s[1]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z4.s, z6.s[2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z4.s, z6.s[3]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z4.s, z7.s[0]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z4.s, z7.s[1]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z3.s, z6.s[0]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z3.s, z6.s[1]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z3.s, z6.s[2]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z3.s, z6.s[3]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z3.s, z7.s[0]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z3.s, z7.s[1]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z2.s, z6.s[0]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z2.s, z6.s[1]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z2.s, z6.s[2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z2.s, z6.s[3]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    ".inst 0x809fb560  // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    ".inst 0x809eb561  // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n"
+    ".inst 0x809db562  // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
+    ".inst 0x809cb563  // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
+    "fmul z11.s, z2.s, z7.s[0]\n"
+    ".inst 0x809bb4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n"
+    ".inst 0x809ab4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n"
+    ".inst 0x8099b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n"
+    ".inst 0x8098b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n"
+    "fmul z5.s, z2.s, z7.s[1]\n"
+    ".inst 0x8097b560  // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n"
+    ".inst 0x8096b561  // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n"
+    ".inst 0x8095b562  // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n"
+    ".inst 0x8094b563  // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n"
+    "fmul z11.s, z9.s, z6.s[0]\n"
+    ".inst 0x8093b4a0  // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n"
+    ".inst 0x8092b4a1  // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n"
+    ".inst 0x8091b4a2  // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n"
+    ".inst 0x8090b4a3  // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n"
+    "fmul z5.s, z9.s, z6.s[1]\n"
+    ".inst 0xc082741f  // mova z31.s, p5/M, za0h.s[XZR]\n"
+    ".inst 0xc082541c  // mova z28.s, p5/M, za0h.s[x14]\n"
+    "fmin z31.s, p5/M, z31.s, z10.s\n"
+    ".inst 0xc082743b  // mova z27.s, p5/M, za0h.s[XZR, #1]\n"
+    "fmin z28.s, p5/M, z28.s, z10.s\n"
+    ".inst 0xc0825438  // mova z24.s, p5/M, za0h.s[x14, #1]\n"
+    "fmin z27.s, p5/M, z27.s, z10.s\n"
+    "mov x13, #0x4\n"
+    "mov x12, #0x8\n"
+    ".inst 0xc082341e  // mova z30.s, p5/M, za0h.s[x13]\n"
+    "fmin z24.s, p5/M, z24.s, z10.s\n"
+    ".inst 0xc082141d  // mova z29.s, p5/M, za0h.s[x12]\n"
+    "fmax z31.s, p5/M, z31.s, z12.s\n"
+    "fmin z30.s, p5/M, z30.s, z10.s\n"
+    ".inst 0xc082343a  // mova z26.s, p5/M, za0h.s[x13, #1]\n"
+    "fmin z29.s, p5/M, z29.s, z10.s\n"
+    "fmax z28.s, p5/M, z28.s, z12.s\n"
+    ".inst 0xc0821439  // mova z25.s, p5/M, za0h.s[x12, #1]\n"
+    "fmax z27.s, p5/M, z27.s, z12.s\n"
+    "fmin z26.s, p5/M, z26.s, z10.s\n"
+    ".inst 0xc0827457  // mova z23.s, p5/M, za0h.s[XZR, #2]\n"
+    "fmin z25.s, p5/M, z25.s, z10.s\n"
+    "fmax z24.s, p5/M, z24.s, z12.s\n"
+    ".inst 0xc0823456  // mova z22.s, p5/M, za0h.s[x13, #2]\n"
+    "fmax z30.s, p5/M, z30.s, z12.s\n"
+    "fmin z23.s, p5/M, z23.s, z10.s\n"
+    ".inst 0xc0821455  // mova z21.s, p5/M, za0h.s[x12, #2]\n"
+    "fmax z29.s, p5/M, z29.s, z12.s\n"
+    "fmin z22.s, p5/M, z22.s, z10.s\n"
+    ".inst 0xc0825454  // mova z20.s, p5/M, za0h.s[x14, #2]\n"
+    "fmax z26.s, p5/M, z26.s, z12.s\n"
+    "fmin z21.s, p5/M, z21.s, z10.s\n"
+    ".inst 0xc0827473  // mova z19.s, p5/M, za0h.s[XZR, #3]\n"
+    "fmax z25.s, p5/M, z25.s, z12.s\n"
+    "fmin z20.s, p5/M, z20.s, z10.s\n"
+    ".inst 0xc0823472  // mova z18.s, p5/M, za0h.s[x13, #3]\n"
+    "fmax z23.s, p5/M, z23.s, z12.s\n"
+    "fmin z19.s, p5/M, z19.s, z10.s\n"
+    ".inst 0xc0821471  // mova z17.s, p5/M, za0h.s[x12, #3]\n"
+    "fmax z22.s, p5/M, z22.s, z12.s\n"
+    "fmin z18.s, p5/M, z18.s, z10.s\n"
+    ".inst 0xc0825470  // mova z16.s, p5/M, za0h.s[x14, #3]\n"
+    "fmax z21.s, p5/M, z21.s, z12.s\n"
+    "fmin z17.s, p5/M, z17.s, z10.s\n"
+    "fmax z20.s, p5/M, z20.s, z12.s\n"
+    "fmin z16.s, p5/M, z16.s, z10.s\n"
+    "st1w { z31.s }, p0, [%x[output], x25, LSL #2]\n"
+    "fmax z19.s, p5/M, z19.s, z12.s\n"
+    "st1w { z30.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z18.s, p5/M, z18.s, z12.s\n"
+    "st1w { z29.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z17.s, p5/M, z17.s, z12.s\n"
+    "st1w { z28.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "fmax z16.s, p5/M, z16.s, z12.s\n"
+    "st1w { z27.s }, p0, [x19, x25, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z26.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z25.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z24.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z23.s }, p0, [x19, x25, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z22.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z21.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z20.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z19.s }, p0, [x19, x25, LSL #2]\n"
+    "st1w { z18.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z17.s }, p0, [x20, x25, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z16.s }, p0, [x20, x25, LSL #2]\n"
+    "whilelt p0.s, x24, %x[n_channels]\n"
+    "b.none 3f\n"
+    ".inst 0xc082749f  // mova z31.s, p5/M, za1h.s[XZR]\n"
+    ".inst 0xc082349e  // mova z30.s, p5/M, za1h.s[x13]\n"
+    "fmin z31.s, p5/M, z31.s, z10.s\n"
+    ".inst 0xc082149d  // mova z29.s, p5/M, za1h.s[x12]\n"
+    "fmin z30.s, p5/M, z30.s, z10.s\n"
+    ".inst 0xc082549c  // mova z28.s, p5/M, za1h.s[x14]\n"
+    "fmin z29.s, p5/M, z29.s, z10.s\n"
+    ".inst 0xc08274bb  // mova z27.s, p5/M, za1h.s[XZR, #1]\n"
+    "fmin z28.s, p5/M, z28.s, z10.s\n"
+    ".inst 0xc08234ba  // mova z26.s, p5/M, za1h.s[x13, #1]\n"
+    "fmax z31.s, p5/M, z31.s, z12.s\n"
+    "fmin z27.s, p5/M, z27.s, z10.s\n"
+    ".inst 0xc08214b9  // mova z25.s, p5/M, za1h.s[x12, #1]\n"
+    "fmax z30.s, p5/M, z30.s, z12.s\n"
+    "fmin z26.s, p5/M, z26.s, z10.s\n"
+    ".inst 0xc08254b8  // mova z24.s, p5/M, za1h.s[x14, #1]\n"
+    "fmax z29.s, p5/M, z29.s, z12.s\n"
+    "fmin z25.s, p5/M, z25.s, z10.s\n"
+    ".inst 0xc08274d7  // mova z23.s, p5/M, za1h.s[XZR, #2]\n"
+    "fmax z28.s, p5/M, z28.s, z12.s\n"
+    "fmin z24.s, p5/M, z24.s, z10.s\n"
+    ".inst 0xc08234d6  // mova z22.s, p5/M, za1h.s[x13, #2]\n"
+    "fmax z27.s, p5/M, z27.s, z12.s\n"
+    "fmin z23.s, p5/M, z23.s, z10.s\n"
+    ".inst 0xc08214d5  // mova z21.s, p5/M, za1h.s[x12, #2]\n"
+    "fmax z26.s, p5/M, z26.s, z12.s\n"
+    "fmin z22.s, p5/M, z22.s, z10.s\n"
+    "add x20, %x[output], %x[output_col_stride], LSL #2\n"
+    ".inst 0xc08254d4  // mova z20.s, p5/M, za1h.s[x14, #2]\n"
+    "fmax z25.s, p5/M, z25.s, z12.s\n"
+    "fmin z21.s, p5/M, z21.s, z10.s\n"
+    "add x19, %x[output], %x[output_row_stride], LSL #2\n"
+    ".inst 0xc08274f3  // mova z19.s, p5/M, za1h.s[XZR, #3]\n"
+    "fmax z24.s, p5/M, z24.s, z12.s\n"
+    "fmin z20.s, p5/M, z20.s, z10.s\n"
+    ".inst 0xc08234f2  // mova z18.s, p5/M, za1h.s[x13, #3]\n"
+    "fmax z23.s, p5/M, z23.s, z12.s\n"
+    "fmin z19.s, p5/M, z19.s, z10.s\n"
+    ".inst 0xc08214f1  // mova z17.s, p5/M, za1h.s[x12, #3]\n"
+    "fmax z22.s, p5/M, z22.s, z12.s\n"
+    "fmin z18.s, p5/M, z18.s, z10.s\n"
+    ".inst 0xc08254f0  // mova z16.s, p5/M, za1h.s[x14, #3]\n"
+    "fmax z21.s, p5/M, z21.s, z12.s\n"
+    "fmin z17.s, p5/M, z17.s, z10.s\n"
+    "fmax z20.s, p5/M, z20.s, z12.s\n"
+    "fmin z16.s, p5/M, z16.s, z10.s\n"
+    "st1w { z31.s }, p0, [%x[output], x24, LSL #2]\n"
+    "fmax z19.s, p5/M, z19.s, z12.s\n"
+    "st1w { z30.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z18.s, p5/M, z18.s, z12.s\n"
+    "st1w { z29.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z17.s, p5/M, z17.s, z12.s\n"
+    "st1w { z28.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "fmax z16.s, p5/M, z16.s, z12.s\n"
+    "st1w { z27.s }, p0, [x19, x24, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z26.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z25.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z24.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z23.s }, p0, [x19, x24, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z22.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z21.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z20.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z19.s }, p0, [x19, x24, LSL #2]\n"
+    "st1w { z18.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z17.s }, p0, [x20, x24, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z16.s }, p0, [x20, x24, LSL #2]\n"
+    "whilelt p0.s, x23, %x[n_channels]\n"
+    "b.none 3f\n"
+    ".inst 0xc082751f  // mova z31.s, p5/M, za2h.s[XZR]\n"
+    ".inst 0xc082351e  // mova z30.s, p5/M, za2h.s[x13]\n"
+    "fmin z31.s, p5/M, z31.s, z10.s\n"
+    ".inst 0xc082151d  // mova z29.s, p5/M, za2h.s[x12]\n"
+    "fmin z30.s, p5/M, z30.s, z10.s\n"
+    ".inst 0xc082551c  // mova z28.s, p5/M, za2h.s[x14]\n"
+    "fmin z29.s, p5/M, z29.s, z10.s\n"
+    ".inst 0xc082753b  // mova z27.s, p5/M, za2h.s[XZR, #1]\n"
+    "fmin z28.s, p5/M, z28.s, z10.s\n"
+    ".inst 0xc082353a  // mova z26.s, p5/M, za2h.s[x13, #1]\n"
+    "fmax z31.s, p5/M, z31.s, z12.s\n"
+    "fmin z27.s, p5/M, z27.s, z10.s\n"
+    ".inst 0xc0821539  // mova z25.s, p5/M, za2h.s[x12, #1]\n"
+    "fmax z30.s, p5/M, z30.s, z12.s\n"
+    "fmin z26.s, p5/M, z26.s, z10.s\n"
+    ".inst 0xc0825538  // mova z24.s, p5/M, za2h.s[x14, #1]\n"
+    "fmax z29.s, p5/M, z29.s, z12.s\n"
+    "fmin z25.s, p5/M, z25.s, z10.s\n"
+    ".inst 0xc0827557  // mova z23.s, p5/M, za2h.s[XZR, #2]\n"
+    "fmax z28.s, p5/M, z28.s, z12.s\n"
+    "fmin z24.s, p5/M, z24.s, z10.s\n"
+    ".inst 0xc0823556  // mova z22.s, p5/M, za2h.s[x13, #2]\n"
+    "fmax z27.s, p5/M, z27.s, z12.s\n"
+    "fmin z23.s, p5/M, z23.s, z10.s\n"
+    ".inst 0xc0821555  // mova z21.s, p5/M, za2h.s[x12, #2]\n"
+    "fmax z26.s, p5/M, z26.s, z12.s\n"
+    "fmin z22.s, p5/M, z22.s, z10.s\n"
+    "add x20, %x[output], %x[output_col_stride], LSL #2\n"
+    ".inst 0xc0825554  // mova z20.s, p5/M, za2h.s[x14, #2]\n"
+    "fmax z25.s, p5/M, z25.s, z12.s\n"
+    "fmin z21.s, p5/M, z21.s, z10.s\n"
+    "add x19, %x[output], %x[output_row_stride], LSL #2\n"
+    ".inst 0xc0827573  // mova z19.s, p5/M, za2h.s[XZR, #3]\n"
+    "fmax z24.s, p5/M, z24.s, z12.s\n"
+    "fmin z20.s, p5/M, z20.s, z10.s\n"
+    ".inst 0xc0823572  // mova z18.s, p5/M, za2h.s[x13, #3]\n"
+    "fmax z23.s, p5/M, z23.s, z12.s\n"
+    "fmin z19.s, p5/M, z19.s, z10.s\n"
+    ".inst 0xc0821571  // mova z17.s, p5/M, za2h.s[x12, #3]\n"
+    "fmax z22.s, p5/M, z22.s, z12.s\n"
+    "fmin z18.s, p5/M, z18.s, z10.s\n"
+    ".inst 0xc0825570  // mova z16.s, p5/M, za2h.s[x14, #3]\n"
+    "fmax z21.s, p5/M, z21.s, z12.s\n"
+    "fmin z17.s, p5/M, z17.s, z10.s\n"
+    "fmax z20.s, p5/M, z20.s, z12.s\n"
+    "fmin z16.s, p5/M, z16.s, z10.s\n"
+    "st1w { z31.s }, p0, [%x[output], x23, LSL #2]\n"
+    "fmax z19.s, p5/M, z19.s, z12.s\n"
+    "st1w { z30.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z18.s, p5/M, z18.s, z12.s\n"
+    "st1w { z29.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z17.s, p5/M, z17.s, z12.s\n"
+    "st1w { z28.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "fmax z16.s, p5/M, z16.s, z12.s\n"
+    "st1w { z27.s }, p0, [x19, x23, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z26.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z25.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z24.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z23.s }, p0, [x19, x23, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z22.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z21.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z20.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z19.s }, p0, [x19, x23, LSL #2]\n"
+    "st1w { z18.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z17.s }, p0, [x20, x23, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z16.s }, p0, [x20, x23, LSL #2]\n"
+    "whilelt p0.s, x22, %x[n_channels]\n"
+    "b.none 3f\n"
+    "fmov z1.s, #1.0\n"
+    ".inst 0xc082759f  // mova z31.s, p5/M, za3h.s[XZR]\n"
+    ".inst 0xc082359e  // mova z30.s, p5/M, za3h.s[x13]\n"
+    "fmin z31.s, p5/M, z31.s, z10.s\n"
+    ".inst 0xc082159d  // mova z29.s, p5/M, za3h.s[x12]\n"
+    "fmin z30.s, p5/M, z30.s, z10.s\n"
+    ".inst 0xc082559c  // mova z28.s, p5/M, za3h.s[x14]\n"
+    "fmin z29.s, p5/M, z29.s, z10.s\n"
+    ".inst 0xc08275bb  // mova z27.s, p5/M, za3h.s[XZR, #1]\n"
+    "fmin z28.s, p5/M, z28.s, z10.s\n"
+    ".inst 0xc08235ba  // mova z26.s, p5/M, za3h.s[x13, #1]\n"
+    "fmax z31.s, p5/M, z31.s, z12.s\n"
+    "fmin z27.s, p5/M, z27.s, z10.s\n"
+    ".inst 0xc08215b9  // mova z25.s, p5/M, za3h.s[x12, #1]\n"
+    "fmax z30.s, p5/M, z30.s, z12.s\n"
+    "fmin z26.s, p5/M, z26.s, z10.s\n"
+    ".inst 0xc08255b8  // mova z24.s, p5/M, za3h.s[x14, #1]\n"
+    "fmax z29.s, p5/M, z29.s, z12.s\n"
+    "fmin z25.s, p5/M, z25.s, z10.s\n"
+    ".inst 0xc08275d7  // mova z23.s, p5/M, za3h.s[XZR, #2]\n"
+    "fmax z28.s, p5/M, z28.s, z12.s\n"
+    "fmin z24.s, p5/M, z24.s, z10.s\n"
+    ".inst 0xc08235d6  // mova z22.s, p5/M, za3h.s[x13, #2]\n"
+    "fmax z27.s, p5/M, z27.s, z12.s\n"
+    "fmin z23.s, p5/M, z23.s, z10.s\n"
+    ".inst 0xc08215d5  // mova z21.s, p5/M, za3h.s[x12, #2]\n"
+    "fmax z26.s, p5/M, z26.s, z12.s\n"
+    "fmin z22.s, p5/M, z22.s, z10.s\n"
+    ".inst 0xc08255d4  // mova z20.s, p5/M, za3h.s[x14, #2]\n"
+    "fmax z25.s, p5/M, z25.s, z12.s\n"
+    "fmin z21.s, p5/M, z21.s, z10.s\n"
+    "add x20, %x[output], %x[output_col_stride], LSL #2\n"
+    ".inst 0xc08275f3  // mova z19.s, p5/M, za3h.s[XZR, #3]\n"
+    "fmax z24.s, p5/M, z24.s, z12.s\n"
+    "fmin z20.s, p5/M, z20.s, z10.s\n"
+    "add x19, %x[output], %x[output_row_stride], LSL #2\n"
+    ".inst 0xc08235f2  // mova z18.s, p5/M, za3h.s[x13, #3]\n"
+    "fmax z23.s, p5/M, z23.s, z12.s\n"
+    "fmin z19.s, p5/M, z19.s, z10.s\n"
+    "incw x25, ALL, MUL #4\n"
+    ".inst 0xc08215f1  // mova z17.s, p5/M, za3h.s[x12, #3]\n"
+    "fmax z22.s, p5/M, z22.s, z12.s\n"
+    "fmin z18.s, p5/M, z18.s, z10.s\n"
+    "incw x24, ALL, MUL #4\n"
+    ".inst 0xc08255f0  // mova z16.s, p5/M, za3h.s[x14, #3]\n"
+    "fmax z21.s, p5/M, z21.s, z12.s\n"
+    "fmin z17.s, p5/M, z17.s, z10.s\n"
+    "incw x23, ALL, MUL #4\n"
+    ".inst 0xc00800ff  // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+    "fmax z20.s, p5/M, z20.s, z12.s\n"
+    "fmin z16.s, p5/M, z16.s, z10.s\n"
+    "add x21, %x[inptr], %x[matrix_stride], LSL #2\n"
+    "fmax z19.s, p5/M, z19.s, z12.s\n"
+    "st1w { z31.s }, p0, [%x[output], x22, LSL #2]\n"
+    "fmax z18.s, p5/M, z18.s, z12.s\n"
+    "st1w { z30.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z17.s, p5/M, z17.s, z12.s\n"
+    "st1w { z29.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "fmax z16.s, p5/M, z16.s, z12.s\n"
+    "st1w { z28.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z27.s }, p0, [x19, x22, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z26.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z25.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z24.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z23.s }, p0, [x19, x22, LSL #2]\n"
+    "add x19, x19, %x[output_row_stride], LSL #2\n"
+    "st1w { z22.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z21.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z20.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x19, %x[output_col_stride], LSL #2\n"
+    "st1w { z19.s }, p0, [x19, x22, LSL #2]\n"
+    "st1w { z18.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z17.s }, p0, [x20, x22, LSL #2]\n"
+    "add x20, x20, %x[output_col_stride], LSL #2\n"
+    "st1w { z16.s }, p0, [x20, x22, LSL #2]\n"
+    "incw x22, ALL, MUL #4\n"
+    "whilelt p1.s, x22, %x[n_channels]\n"
+    "ld1w { z28.s }, p1/Z, [%x[inptr], x22, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "whilelt p2.s, x23, %x[n_channels]\n"
+    "whilelt p3.s, x24, %x[n_channels]\n"
+    "ld1w { z30.s }, p3/Z, [%x[inptr], x24, LSL #2]\n"
+    "whilelt p4.s, x25, %x[n_channels]\n"
+    "ld1w { z31.s }, p4/Z, [%x[inptr], x25, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p4.b\n"
+    "ld1w { z29.s }, p2/Z, [%x[inptr], x23, LSL #2]\n"
+    "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "add x21, x21, %x[matrix_stride], LSL #2\n"
+    "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n"
+    "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n"
+    "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x25, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p3.b\n"
+    ".inst 0x8080b420  // fmopa za0.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x24, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p2.b\n"
+    ".inst 0x8080b421  // fmopa za1.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x23, LSL #2]\n"
+    "and p0.b, p5/Z, p8.b, p1.b\n"
+    ".inst 0x8080b422  // fmopa za2.s, p5/M, p5/M, z1.s, z0.s\n"
+    "ld1w { z0.s }, p0/Z, [%x[bptr], x22, LSL #2]\n"
+    ".inst 0x8080b423  // fmopa za3.s, p5/M, p5/M, z1.s, z0.s\n"
+    "b.any 2b\n"
+    "3:"  // End
+    ".inst 0xd503467f  // SMSTOP\n"
+    :
+    : [bptr] "r" (bptr), [inptr] "r" (inptr), [matrix_stride] "r" (matrix_stride), [n_channels] "r" (n_channels), [offsetof_Params_act_max] "I" (offsetof(Params, act_max)), [offsetof_Params_act_min] "I" (offsetof(Params, act_min)), [offsetof_Params_inner_terms] "I" (offsetof(Params, inner_terms)), [offsetof_Params_outer_terms] "I" (offsetof(Params, outer_terms)), [output] "r" (output), [output_col_stride] "r" (output_col_stride), [output_row_stride] "r" (output_row_stride), [params] "r" (&params)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p8", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace output_transform
+}  // namespace winograd
+}  // namespace arm_conv
+
+#endif //defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
index 73abe8b..a221aee 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
@@ -29,6 +29,11 @@
 namespace winograd {
 namespace output_transform {
 
+#if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+void sme_fp32_mopa_4x4_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float);
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+#endif  // defined(__aarch64__)
 void arm_fp32_4x4_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float);
 void arm_fp32_2x2_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float);
 void arm_fp32_2x2_5x5(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float);
@@ -44,6 +49,9 @@
 
 static const TransformImplementation<float> transforms_fp32[] = {
 #if defined(__aarch64__)
+#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
+  { IMPL(4, 4, 3, 3, sme_fp32_mopa_4x4_3x3, Unpadded), MethodConstraints::RequiresSME },
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SME)
 #endif  // defined(__aarch64__)
   { IMPL(4, 4, 3, 3, arm_fp32_4x4_3x3, Unpadded), MethodConstraints::LargerShape },
   { IMPL(2, 2, 3, 3, arm_fp32_2x2_3x3, Unpadded) },