Integrate improved pooling layer on NEON

Resolves COMPMID-4035

Change-Id: I559f8c4208fba9193dfe5012f03ddaf26c746215
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4855
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..178db4a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  constexpr static unsigned int pool_rows(void) { return 3; }
+  constexpr static unsigned int pool_cols(void) { return 3; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
+
+  a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..f11bb68
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    __fp16 rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<__fp16>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x5, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x6, #0x0\n"
+    "ldr d8, [%x[args], %[offsetof_rescale]]\n"
+    "ldp x7, x8, [x19, #0x0]\n"
+    "cmp x4, #0x8\n"
+    "ldp x17, x16, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x15, x14, [x19, #0x0]\n"
+    "ldp x13, x12, [x19, #0x10]\n"
+    "ldp x11, x10, [x19, #0x20]\n"
+    "ldp x9, x28, [x19, #0x30]\n"
+    "ldp x27, x26, [x19, #0x40]\n"
+    "ldp x25, x24, [x19, #0x50]\n"
+    "ldp x23, x22, [x19, #0x60]\n"
+    "ldp x21, x20, [x19, #0x70]\n"
+    "blt 3f\n"
+    "lsr x19, x4, #0x3\n"
+    "sub x4, x4, x19, LSL #3\n"
+    "ldr q7, [x10, x5]\n"
+    "ldr q6, [x9, x5]\n"
+    "ldr q5, [x26, x5]\n"
+    "ldr q4, [x25, x5]\n"
+    "ldr q3, [x14, x5]\n"
+    "ldr q2, [x13, x5]\n"
+    "ldr q1, [x11, x5]\n"
+    "ldr q0, [x27, x5]\n"
+    "ldr q31, [x28, x5]\n"
+    "ldr q30, [x24, x5]\n"
+    "ldr q29, [x22, x5]\n"
+    "ldr q28, [x21, x5]\n"
+    "ldr q27, [x15, x5]\n"
+    "ldr q26, [x12, x5]\n"
+    "ldr q25, [x23, x5]\n"
+    "ldr q24, [x20, x5]\n"
+    "add x5, x5, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "fadd v17.8h, v7.8h, v6.8h\n"
+    "ldr q7, [x10, x5]\n"
+    "fadd v16.8h, v5.8h, v4.8h\n"
+    "ldr q6, [x9, x5]\n"
+    "fadd v18.8h, v3.8h, v2.8h\n"
+    "ldr q5, [x26, x5]\n"
+    "fadd v23.8h, v1.8h, v0.8h\n"
+    "ldr q4, [x25, x5]\n"
+    "fadd v17.8h, v17.8h, v16.8h\n"
+    "ldr q3, [x14, x5]\n"
+    "fadd v22.8h, v31.8h, v30.8h\n"
+    "ldr q2, [x13, x5]\n"
+    "fadd v16.8h, v29.8h, v28.8h\n"
+    "ldr q1, [x11, x5]\n"
+    "fadd v21.8h, v18.8h, v17.8h\n"
+    "ldr q0, [x27, x5]\n"
+    "fadd v19.8h, v27.8h, v23.8h\n"
+    "ldr q31, [x28, x5]\n"
+    "fadd v20.8h, v16.8h, v17.8h\n"
+    "ldr q30, [x24, x5]\n"
+    "fadd v18.8h, v26.8h, v22.8h\n"
+    "ldr q29, [x22, x5]\n"
+    "fadd v17.8h, v25.8h, v23.8h\n"
+    "ldr q28, [x21, x5]\n"
+    "fadd v16.8h, v24.8h, v22.8h\n"
+    "ldr q27, [x15, x5]\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "ldr q26, [x12, x5]\n"
+    "fadd v18.8h, v21.8h, v18.8h\n"
+    "ldr q25, [x23, x5]\n"
+    "fadd v17.8h, v17.8h, v20.8h\n"
+    "ldr q24, [x20, x5]\n"
+    "fadd v16.8h, v20.8h, v16.8h\n"
+    "add x5, x5, #0x10\n"
+    "fmul v19.8h, v19.8h, v8.h[0]\n"
+    "subs x19, x19, #0x1\n"
+    "fmul v18.8h, v18.8h, v8.h[1]\n"
+    "str q19, [x7, x6]\n"
+    "fmul v17.8h, v17.8h, v8.h[2]\n"
+    "fmul v16.8h, v16.8h, v8.h[3]\n"
+    "str q18, [x8, x6]\n"
+    "str q17, [x17, x6]\n"
+    "str q16, [x16, x6]\n"
+    "add x6, x6, #0x10\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "fadd v17.8h, v7.8h, v6.8h\n"
+    "fadd v16.8h, v5.8h, v4.8h\n"
+    "fadd v18.8h, v3.8h, v2.8h\n"
+    "fadd v23.8h, v1.8h, v0.8h\n"
+    "fadd v17.8h, v17.8h, v16.8h\n"
+    "fadd v22.8h, v31.8h, v30.8h\n"
+    "fadd v16.8h, v29.8h, v28.8h\n"
+    "fadd v21.8h, v18.8h, v17.8h\n"
+    "fadd v19.8h, v27.8h, v23.8h\n"
+    "fadd v20.8h, v16.8h, v17.8h\n"
+    "fadd v18.8h, v26.8h, v22.8h\n"
+    "fadd v17.8h, v25.8h, v23.8h\n"
+    "fadd v16.8h, v24.8h, v22.8h\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "fadd v18.8h, v21.8h, v18.8h\n"
+    "fadd v17.8h, v17.8h, v20.8h\n"
+    "fadd v16.8h, v20.8h, v16.8h\n"
+    "fmul v19.8h, v19.8h, v8.h[0]\n"
+    "str q19, [x7, x6]\n"
+    "fmul v18.8h, v18.8h, v8.h[1]\n"
+    "fmul v17.8h, v17.8h, v8.h[2]\n"
+    "str q18, [x8, x6]\n"
+    "fmul v16.8h, v16.8h, v8.h[3]\n"
+    "str q17, [x17, x6]\n"
+    "str q16, [x16, x6]\n"
+    "add x6, x6, #0x10\n"
+    "cbz x4, 4f\n"
+    "3:"  // Oddments
+    "ldr h7, [x10, x5]\n"
+    "ldr h6, [x9, x5]\n"
+    "fadd v17.8h, v7.8h, v6.8h\n"
+    "ldr h5, [x26, x5]\n"
+    "ldr h4, [x25, x5]\n"
+    "fadd v16.8h, v5.8h, v4.8h\n"
+    "ldr h3, [x14, x5]\n"
+    "ldr h2, [x13, x5]\n"
+    "fadd v17.8h, v17.8h, v16.8h\n"
+    "ldr h1, [x11, x5]\n"
+    "ldr h0, [x27, x5]\n"
+    "fadd v18.8h, v3.8h, v2.8h\n"
+    "ldr h31, [x28, x5]\n"
+    "ldr h30, [x24, x5]\n"
+    "fadd v23.8h, v1.8h, v0.8h\n"
+    "ldr h29, [x22, x5]\n"
+    "fadd v21.8h, v18.8h, v17.8h\n"
+    "ldr h28, [x21, x5]\n"
+    "ldr h27, [x15, x5]\n"
+    "fadd v22.8h, v31.8h, v30.8h\n"
+    "ldr h26, [x12, x5]\n"
+    "fadd v16.8h, v29.8h, v28.8h\n"
+    "ldr h25, [x23, x5]\n"
+    "fadd v19.8h, v27.8h, v23.8h\n"
+    "ldr h24, [x20, x5]\n"
+    "fadd v18.8h, v26.8h, v22.8h\n"
+    "add x5, x5, #0x2\n"
+    "subs x4, x4, #0x1\n"
+    "fadd v20.8h, v16.8h, v17.8h\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "fadd v18.8h, v21.8h, v18.8h\n"
+    "fadd v17.8h, v25.8h, v23.8h\n"
+    "fadd v16.8h, v24.8h, v22.8h\n"
+    "fmul v19.8h, v19.8h, v8.h[0]\n"
+    "str h19, [x7, x6]\n"
+    "fadd v17.8h, v17.8h, v20.8h\n"
+    "fadd v16.8h, v20.8h, v16.8h\n"
+    "fmul v18.8h, v18.8h, v8.h[1]\n"
+    "str h18, [x8, x6]\n"
+    "fmul v17.8h, v17.8h, v8.h[2]\n"
+    "fmul v16.8h, v16.8h, v8.h[3]\n"
+    "str h17, [x17, x6]\n"
+    "str h16, [x16, x6]\n"
+    "add x6, x6, #0x2\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..7bf1f43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct a64_fp16_nhwc_avg_generic_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_fp16_nhwc_avg_generic_depthfirst_impl;
+
+  a64_fp16_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..420616b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_fp16_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    "ld1r { v7.8h }, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x20\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v5.16b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd v22.8h, v30.8h, v22.8h\n"
+    "add x19, x19, #0x20\n"
+    "fadd v18.8h, v29.8h, v28.8h\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v21.8h, v27.8h, v21.8h\n"
+    "fadd v17.8h, v26.8h, v17.8h\n"
+    "ldr q1, [x22, x28]\n"
+    "fadd v20.8h, v25.8h, v20.8h\n"
+    "ldr q0, [x21, x28]\n"
+    "fadd v16.8h, v24.8h, v16.8h\n"
+    "ldr q31, [x20, x28]\n"
+    "fadd v19.8h, v23.8h, v19.8h\n"
+    "ldr q30, [x23, x27]\n"
+    "fadd v18.8h, v22.8h, v18.8h\n"
+    "ldr q22, [x22, x27]\n"
+    "fadd v17.8h, v21.8h, v17.8h\n"
+    "ldr q29, [x21, x27]\n"
+    "fadd v16.8h, v20.8h, v16.8h\n"
+    "ldr q28, [x20, x27]\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "ldr q27, [x23, x26]\n"
+    "fadd v5.8h, v5.8h, v18.8h\n"
+    "ldr q21, [x22, x26]\n"
+    "fadd v4.8h, v4.8h, v17.8h\n"
+    "ldr q26, [x21, x26]\n"
+    "fadd v3.8h, v3.8h, v16.8h\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "fadd v22.8h, v30.8h, v22.8h\n"
+    "fadd v18.8h, v29.8h, v28.8h\n"
+    "fadd v21.8h, v27.8h, v21.8h\n"
+    "fadd v17.8h, v26.8h, v17.8h\n"
+    "fadd v20.8h, v25.8h, v20.8h\n"
+    "fadd v16.8h, v24.8h, v16.8h\n"
+    "fadd v19.8h, v23.8h, v19.8h\n"
+    "fadd v18.8h, v22.8h, v18.8h\n"
+    "fadd v17.8h, v21.8h, v17.8h\n"
+    "fadd v16.8h, v20.8h, v16.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "fadd v5.8h, v5.8h, v18.8h\n"
+    "fadd v4.8h, v4.8h, v17.8h\n"
+    "fadd v3.8h, v3.8h, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.8h, v6.8h, v2.8h\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "fadd v5.8h, v5.8h, v30.8h\n"
+    "ldr q25, [x23, x25]\n"
+    "fadd v4.8h, v4.8h, v27.8h\n"
+    "fadd v3.8h, v3.8h, v25.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul v6.8h, v6.8h, v7.8h\n"
+    "sub %x[n_channels], %x[n_channels], #0x20\n"
+    "fmul v5.8h, v5.8h, v7.8h\n"
+    "cmp %x[n_channels], #0x20\n"
+    "fmul v4.8h, v4.8h, v7.8h\n"
+    "str q6, [%x[outptr], x28]\n"
+    "fmul v3.8h, v3.8h, v7.8h\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 31f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x8\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd v19.8h, v23.8h, v19.8h\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "fadd v19.8h, v23.8h, v19.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.8h, v6.8h, v2.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul v6.8h, v6.8h, v7.8h\n"
+    "sub %x[n_channels], %x[n_channels], #0x8\n"
+    "cmp %x[n_channels], #0x8\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 31f\n"
+    "14:"  // Oddments
+    "movi v6.16b, #0x0\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 20f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "b 19f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "b 19f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "b 19f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 19f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 2: End
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "fadd v19.8h, v23.8h, v19.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "bgt 15b\n"
+    "20:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 26f\n"
+    "21:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #2, 23f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #1, 22f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "b 25f\n"
+    "22:"  // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "b 25f\n"
+    "23:"  // Oddments: Single input loop: Load: Bit 2: Unset
+    "tbz %x[n_channels], #1, 24f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "b 25f\n"
+    "24:"  // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 25f\n"
+    "ldr h2, [x23], #0x2\n"
+    "25:"  // Oddments: Single input loop: Load: Bit 2: End
+    "fadd v6.8h, v6.8h, v2.8h\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 21b\n"
+    "26:"  // Oddments: Single input loop: End
+    "fmul v6.8h, v6.8h, v7.8h\n"
+    "tbz %x[n_channels], #2, 28f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #1, 27f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "27:"  // Oddments: Store: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "28:"  // Oddments: Store: Bit 2: Unset
+    "tbz %x[n_channels], #1, 29f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "29:"  // Oddments: Store: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
+    "30:"  // Oddments: Store: Bit 2: End
+
+    "31:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..9950bb8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..6e69ca0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x19, #0x0]\n"
+    "cmp x15, #0x8\n"
+    "ldp x10, x9, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x28, x27, [x19, #0x0]\n"
+    "ldp x26, x25, [x19, #0x10]\n"
+    "ldp x24, x23, [x19, #0x20]\n"
+    "ldp x22, x21, [x19, #0x30]\n"
+    "ldr x20, [x19, #0x40]\n"
+    "blt 3f\n"
+    "lsr x19, x15, #0x3\n"
+    "sub x15, x15, x19, LSL #3\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
+    "ldr q25, [x23, x14]\n"
+    "ldr q24, [x26, x14]\n"
+    "ldr q23, [x22, x14]\n"
+    "ldr q22, [x20, x14]\n"
+    "add x14, x14, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "fmax v21.8h, v30.8h, v29.8h\n"
+    "ldr q30, [x27, x14]\n"
+    "fmax v20.8h, v29.8h, v28.8h\n"
+    "ldr q29, [x24, x14]\n"
+    "fmax v19.8h, v27.8h, v26.8h\n"
+    "ldr q28, [x21, x14]\n"
+    "fmax v18.8h, v25.8h, v24.8h\n"
+    "ldr q26, [x28, x14]\n"
+    "fmax v17.8h, v23.8h, v27.8h\n"
+    "ldr q27, [x25, x14]\n"
+    "fmax v16.8h, v25.8h, v22.8h\n"
+    "ldr q25, [x23, x14]\n"
+    "fmax v19.8h, v21.8h, v19.8h\n"
+    "ldr q24, [x26, x14]\n"
+    "fmax v18.8h, v21.8h, v18.8h\n"
+    "ldr q23, [x22, x14]\n"
+    "fmax v17.8h, v20.8h, v17.8h\n"
+    "ldr q22, [x20, x14]\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "add x14, x14, #0x10\n"
+    "str q19, [x12, x13]\n"
+    "str q18, [x11, x13]\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "fmax v21.8h, v30.8h, v29.8h\n"
+    "fmax v20.8h, v29.8h, v28.8h\n"
+    "fmax v19.8h, v27.8h, v26.8h\n"
+    "fmax v18.8h, v25.8h, v24.8h\n"
+    "fmax v17.8h, v23.8h, v27.8h\n"
+    "fmax v16.8h, v25.8h, v22.8h\n"
+    "fmax v19.8h, v21.8h, v19.8h\n"
+    "str q19, [x12, x13]\n"
+    "fmax v18.8h, v21.8h, v18.8h\n"
+    "fmax v17.8h, v20.8h, v17.8h\n"
+    "str q18, [x11, x13]\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "cbz x15, 4f\n"
+    "3:"  // Oddments
+    "ldr h30, [x27, x14]\n"
+    "ldr h29, [x24, x14]\n"
+    "fmax v21.8h, v30.8h, v29.8h\n"
+    "ldr h28, [x21, x14]\n"
+    "ldr h27, [x25, x14]\n"
+    "fmax v20.8h, v29.8h, v28.8h\n"
+    "ldr h26, [x28, x14]\n"
+    "ldr h25, [x23, x14]\n"
+    "fmax v19.8h, v27.8h, v26.8h\n"
+    "ldr h24, [x26, x14]\n"
+    "ldr h23, [x22, x14]\n"
+    "fmax v19.8h, v21.8h, v19.8h\n"
+    "ldr h22, [x20, x14]\n"
+    "add x14, x14, #0x2\n"
+    "fmax v18.8h, v25.8h, v24.8h\n"
+    "subs x15, x15, #0x1\n"
+    "fmax v17.8h, v23.8h, v27.8h\n"
+    "str h19, [x12, x13]\n"
+    "fmax v16.8h, v25.8h, v22.8h\n"
+    "fmax v18.8h, v21.8h, v18.8h\n"
+    "str h18, [x11, x13]\n"
+    "fmax v17.8h, v20.8h, v17.8h\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "str h17, [x10, x13]\n"
+    "str h16, [x9, x13]\n"
+    "add x13, x13, #0x2\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..c903785
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp16_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct a64_fp16_nhwc_max_generic_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_fp16_nhwc_max_generic_depthfirst_impl;
+
+  a64_fp16_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..9901b20
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_fp16_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x20\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov w20, #0xfc00\n"
+    "dup v6.8h, w20\n"
+    "mov x19, %x[inptrs]\n"
+    "dup v5.8h, w20\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v4.8h, w20\n"
+    "dup v3.8h, w20\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax v22.8h, v30.8h, v22.8h\n"
+    "add x19, x19, #0x20\n"
+    "fmax v18.8h, v29.8h, v28.8h\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v21.8h, v27.8h, v21.8h\n"
+    "fmax v17.8h, v26.8h, v17.8h\n"
+    "ldr q1, [x22, x28]\n"
+    "fmax v20.8h, v25.8h, v20.8h\n"
+    "ldr q0, [x21, x28]\n"
+    "fmax v16.8h, v24.8h, v16.8h\n"
+    "ldr q31, [x20, x28]\n"
+    "fmax v19.8h, v23.8h, v19.8h\n"
+    "ldr q30, [x23, x27]\n"
+    "fmax v18.8h, v22.8h, v18.8h\n"
+    "ldr q22, [x22, x27]\n"
+    "fmax v17.8h, v21.8h, v17.8h\n"
+    "ldr q29, [x21, x27]\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "ldr q28, [x20, x27]\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "ldr q27, [x23, x26]\n"
+    "fmax v5.8h, v5.8h, v18.8h\n"
+    "ldr q21, [x22, x26]\n"
+    "fmax v4.8h, v4.8h, v17.8h\n"
+    "ldr q26, [x21, x26]\n"
+    "fmax v3.8h, v3.8h, v16.8h\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "fmax v22.8h, v30.8h, v22.8h\n"
+    "fmax v18.8h, v29.8h, v28.8h\n"
+    "fmax v21.8h, v27.8h, v21.8h\n"
+    "fmax v17.8h, v26.8h, v17.8h\n"
+    "fmax v20.8h, v25.8h, v20.8h\n"
+    "fmax v16.8h, v24.8h, v16.8h\n"
+    "fmax v19.8h, v23.8h, v19.8h\n"
+    "fmax v18.8h, v22.8h, v18.8h\n"
+    "fmax v17.8h, v21.8h, v17.8h\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "fmax v5.8h, v5.8h, v18.8h\n"
+    "fmax v4.8h, v4.8h, v17.8h\n"
+    "fmax v3.8h, v3.8h, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.8h, v6.8h, v2.8h\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "fmax v5.8h, v5.8h, v30.8h\n"
+    "ldr q25, [x23, x25]\n"
+    "fmax v4.8h, v4.8h, v27.8h\n"
+    "fmax v3.8h, v3.8h, v25.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "sub %x[n_channels], %x[n_channels], #0x20\n"
+    "cmp %x[n_channels], #0x20\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 31f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x8\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov w19, #0xfc00\n"
+    "dup v6.8h, w19\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax v19.8h, v23.8h, v19.8h\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "fmax v19.8h, v23.8h, v19.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.8h, v6.8h, v2.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "sub %x[n_channels], %x[n_channels], #0x8\n"
+    "cmp %x[n_channels], #0x8\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 31f\n"
+    "14:"  // Oddments
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov w19, #0xfc00\n"
+    "dup v6.8h, w19\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 20f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "b 19f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "b 19f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #0, 19f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "b 19f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 19f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 2: End
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "fmax v19.8h, v23.8h, v19.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "bgt 15b\n"
+    "20:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 26f\n"
+    "21:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #2, 23f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #1, 22f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "b 25f\n"
+    "22:"  // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "b 25f\n"
+    "23:"  // Oddments: Single input loop: Load: Bit 2: Unset
+    "tbz %x[n_channels], #1, 24f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #0, 25f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "b 25f\n"
+    "24:"  // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 25f\n"
+    "ldr h2, [x23], #0x2\n"
+    "25:"  // Oddments: Single input loop: Load: Bit 2: End
+    "fmax v6.8h, v6.8h, v2.8h\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 21b\n"
+    "26:"  // Oddments: Single input loop: End
+    "tbz %x[n_channels], #2, 28f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #1, 27f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "27:"  // Oddments: Store: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "28:"  // Oddments: Store: Bit 2: Unset
+    "tbz %x[n_channels], #1, 29f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
+    "b 30f\n"
+    "29:"  // Oddments: Store: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 30f\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
+    "30:"  // Oddments: Store: Bit 2: End
+
+    "31:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..9a16b99
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  constexpr static unsigned int pool_rows(void) { return 3; }
+  constexpr static unsigned int pool_cols(void) { return 3; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
+
+  a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..bed4848
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    float rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<float>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x5, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x6, #0x0\n"
+    "ldr q8, [%x[args], %[offsetof_rescale]]\n"
+    "ldp x7, x8, [x19, #0x0]\n"
+    "cmp x4, #0x4\n"
+    "ldp x17, x16, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x15, x14, [x19, #0x0]\n"
+    "ldp x13, x12, [x19, #0x10]\n"
+    "ldp x11, x10, [x19, #0x20]\n"
+    "ldp x9, x28, [x19, #0x30]\n"
+    "ldp x27, x26, [x19, #0x40]\n"
+    "ldp x25, x24, [x19, #0x50]\n"
+    "ldp x23, x22, [x19, #0x60]\n"
+    "ldp x21, x20, [x19, #0x70]\n"
+    "blt 3f\n"
+    "lsr x19, x4, #0x2\n"
+    "sub x4, x4, x19, LSL #2\n"
+    "ldr q7, [x10, x5]\n"
+    "ldr q6, [x9, x5]\n"
+    "ldr q5, [x26, x5]\n"
+    "ldr q4, [x25, x5]\n"
+    "ldr q3, [x14, x5]\n"
+    "ldr q2, [x13, x5]\n"
+    "ldr q1, [x11, x5]\n"
+    "ldr q0, [x27, x5]\n"
+    "ldr q31, [x28, x5]\n"
+    "ldr q30, [x24, x5]\n"
+    "ldr q29, [x22, x5]\n"
+    "ldr q28, [x21, x5]\n"
+    "ldr q27, [x15, x5]\n"
+    "ldr q26, [x12, x5]\n"
+    "ldr q25, [x23, x5]\n"
+    "ldr q24, [x20, x5]\n"
+    "add x5, x5, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "fadd v17.4s, v7.4s, v6.4s\n"
+    "ldr q7, [x10, x5]\n"
+    "fadd v16.4s, v5.4s, v4.4s\n"
+    "ldr q6, [x9, x5]\n"
+    "fadd v18.4s, v3.4s, v2.4s\n"
+    "ldr q5, [x26, x5]\n"
+    "fadd v23.4s, v1.4s, v0.4s\n"
+    "ldr q4, [x25, x5]\n"
+    "fadd v17.4s, v17.4s, v16.4s\n"
+    "ldr q3, [x14, x5]\n"
+    "fadd v22.4s, v31.4s, v30.4s\n"
+    "ldr q2, [x13, x5]\n"
+    "fadd v16.4s, v29.4s, v28.4s\n"
+    "ldr q1, [x11, x5]\n"
+    "fadd v21.4s, v18.4s, v17.4s\n"
+    "ldr q0, [x27, x5]\n"
+    "fadd v19.4s, v27.4s, v23.4s\n"
+    "ldr q31, [x28, x5]\n"
+    "fadd v20.4s, v16.4s, v17.4s\n"
+    "ldr q30, [x24, x5]\n"
+    "fadd v18.4s, v26.4s, v22.4s\n"
+    "ldr q29, [x22, x5]\n"
+    "fadd v17.4s, v25.4s, v23.4s\n"
+    "ldr q28, [x21, x5]\n"
+    "fadd v16.4s, v24.4s, v22.4s\n"
+    "ldr q27, [x15, x5]\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "ldr q26, [x12, x5]\n"
+    "fadd v18.4s, v21.4s, v18.4s\n"
+    "ldr q25, [x23, x5]\n"
+    "fadd v17.4s, v17.4s, v20.4s\n"
+    "ldr q24, [x20, x5]\n"
+    "fadd v16.4s, v20.4s, v16.4s\n"
+    "add x5, x5, #0x10\n"
+    "fmul v19.4s, v19.4s, v8.s[0]\n"
+    "subs x19, x19, #0x1\n"
+    "fmul v18.4s, v18.4s, v8.s[1]\n"
+    "str q19, [x7, x6]\n"
+    "fmul v17.4s, v17.4s, v8.s[2]\n"
+    "fmul v16.4s, v16.4s, v8.s[3]\n"
+    "str q18, [x8, x6]\n"
+    "str q17, [x17, x6]\n"
+    "str q16, [x16, x6]\n"
+    "add x6, x6, #0x10\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "fadd v17.4s, v7.4s, v6.4s\n"
+    "fadd v16.4s, v5.4s, v4.4s\n"
+    "fadd v18.4s, v3.4s, v2.4s\n"
+    "fadd v23.4s, v1.4s, v0.4s\n"
+    "fadd v17.4s, v17.4s, v16.4s\n"
+    "fadd v22.4s, v31.4s, v30.4s\n"
+    "fadd v16.4s, v29.4s, v28.4s\n"
+    "fadd v21.4s, v18.4s, v17.4s\n"
+    "fadd v19.4s, v27.4s, v23.4s\n"
+    "fadd v20.4s, v16.4s, v17.4s\n"
+    "fadd v18.4s, v26.4s, v22.4s\n"
+    "fadd v17.4s, v25.4s, v23.4s\n"
+    "fadd v16.4s, v24.4s, v22.4s\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "fadd v18.4s, v21.4s, v18.4s\n"
+    "fadd v17.4s, v17.4s, v20.4s\n"
+    "fadd v16.4s, v20.4s, v16.4s\n"
+    "fmul v19.4s, v19.4s, v8.s[0]\n"
+    "str q19, [x7, x6]\n"
+    "fmul v18.4s, v18.4s, v8.s[1]\n"
+    "fmul v17.4s, v17.4s, v8.s[2]\n"
+    "str q18, [x8, x6]\n"
+    "fmul v16.4s, v16.4s, v8.s[3]\n"
+    "str q17, [x17, x6]\n"
+    "str q16, [x16, x6]\n"
+    "add x6, x6, #0x10\n"
+    "cbz x4, 4f\n"
+    "3:"  // Oddments
+    "ldr s7, [x10, x5]\n"
+    "ldr s6, [x9, x5]\n"
+    "fadd v17.4s, v7.4s, v6.4s\n"
+    "ldr s5, [x26, x5]\n"
+    "ldr s4, [x25, x5]\n"
+    "fadd v16.4s, v5.4s, v4.4s\n"
+    "ldr s3, [x14, x5]\n"
+    "ldr s2, [x13, x5]\n"
+    "fadd v17.4s, v17.4s, v16.4s\n"
+    "ldr s1, [x11, x5]\n"
+    "ldr s0, [x27, x5]\n"
+    "fadd v18.4s, v3.4s, v2.4s\n"
+    "ldr s31, [x28, x5]\n"
+    "ldr s30, [x24, x5]\n"
+    "fadd v23.4s, v1.4s, v0.4s\n"
+    "ldr s29, [x22, x5]\n"
+    "fadd v21.4s, v18.4s, v17.4s\n"
+    "ldr s28, [x21, x5]\n"
+    "ldr s27, [x15, x5]\n"
+    "fadd v22.4s, v31.4s, v30.4s\n"
+    "ldr s26, [x12, x5]\n"
+    "fadd v16.4s, v29.4s, v28.4s\n"
+    "ldr s25, [x23, x5]\n"
+    "fadd v19.4s, v27.4s, v23.4s\n"
+    "ldr s24, [x20, x5]\n"
+    "fadd v18.4s, v26.4s, v22.4s\n"
+    "add x5, x5, #0x4\n"
+    "subs x4, x4, #0x1\n"
+    "fadd v20.4s, v16.4s, v17.4s\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "fadd v18.4s, v21.4s, v18.4s\n"
+    "fadd v17.4s, v25.4s, v23.4s\n"
+    "fadd v16.4s, v24.4s, v22.4s\n"
+    "fmul v19.4s, v19.4s, v8.s[0]\n"
+    "str s19, [x7, x6]\n"
+    "fadd v17.4s, v17.4s, v20.4s\n"
+    "fadd v16.4s, v20.4s, v16.4s\n"
+    "fmul v18.4s, v18.4s, v8.s[1]\n"
+    "str s18, [x8, x6]\n"
+    "fmul v17.4s, v17.4s, v8.s[2]\n"
+    "fmul v16.4s, v16.4s, v8.s[3]\n"
+    "str s17, [x17, x6]\n"
+    "str s16, [x16, x6]\n"
+    "add x6, x6, #0x4\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..e5a465e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct a64_fp32_nhwc_avg_generic_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_fp32_nhwc_avg_generic_depthfirst_impl;
+
+  a64_fp32_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..f607518
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_fp32_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    "ld1r { v7.4s }, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x10\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v5.16b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd v22.4s, v30.4s, v22.4s\n"
+    "add x19, x19, #0x20\n"
+    "fadd v18.4s, v29.4s, v28.4s\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v21.4s, v27.4s, v21.4s\n"
+    "fadd v17.4s, v26.4s, v17.4s\n"
+    "ldr q1, [x22, x28]\n"
+    "fadd v20.4s, v25.4s, v20.4s\n"
+    "ldr q0, [x21, x28]\n"
+    "fadd v16.4s, v24.4s, v16.4s\n"
+    "ldr q31, [x20, x28]\n"
+    "fadd v19.4s, v23.4s, v19.4s\n"
+    "ldr q30, [x23, x27]\n"
+    "fadd v18.4s, v22.4s, v18.4s\n"
+    "ldr q22, [x22, x27]\n"
+    "fadd v17.4s, v21.4s, v17.4s\n"
+    "ldr q29, [x21, x27]\n"
+    "fadd v16.4s, v20.4s, v16.4s\n"
+    "ldr q28, [x20, x27]\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "ldr q27, [x23, x26]\n"
+    "fadd v5.4s, v5.4s, v18.4s\n"
+    "ldr q21, [x22, x26]\n"
+    "fadd v4.4s, v4.4s, v17.4s\n"
+    "ldr q26, [x21, x26]\n"
+    "fadd v3.4s, v3.4s, v16.4s\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "fadd v22.4s, v30.4s, v22.4s\n"
+    "fadd v18.4s, v29.4s, v28.4s\n"
+    "fadd v21.4s, v27.4s, v21.4s\n"
+    "fadd v17.4s, v26.4s, v17.4s\n"
+    "fadd v20.4s, v25.4s, v20.4s\n"
+    "fadd v16.4s, v24.4s, v16.4s\n"
+    "fadd v19.4s, v23.4s, v19.4s\n"
+    "fadd v18.4s, v22.4s, v18.4s\n"
+    "fadd v17.4s, v21.4s, v17.4s\n"
+    "fadd v16.4s, v20.4s, v16.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "fadd v5.4s, v5.4s, v18.4s\n"
+    "fadd v4.4s, v4.4s, v17.4s\n"
+    "fadd v3.4s, v3.4s, v16.4s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.4s, v6.4s, v2.4s\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "fadd v5.4s, v5.4s, v30.4s\n"
+    "ldr q25, [x23, x25]\n"
+    "fadd v4.4s, v4.4s, v27.4s\n"
+    "fadd v3.4s, v3.4s, v25.4s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul v6.4s, v6.4s, v7.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "fmul v5.4s, v5.4s, v7.4s\n"
+    "cmp %x[n_channels], #0x10\n"
+    "fmul v4.4s, v4.4s, v7.4s\n"
+    "str q6, [%x[outptr], x28]\n"
+    "fmul v3.4s, v3.4s, v7.4s\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 25f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x4\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd v19.4s, v23.4s, v19.4s\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "fadd v19.4s, v23.4s, v19.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fadd v6.4s, v6.4s, v2.4s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul v6.4s, v6.4s, v7.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x4\n"
+    "cmp %x[n_channels], #0x4\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 25f\n"
+    "14:"  // Oddments
+    "movi v6.16b, #0x0\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 18f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #0, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "b 17f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 1: Unset
+    "tbz %x[n_channels], #0, 17f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 1: End
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "fadd v19.4s, v23.4s, v19.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "bgt 15b\n"
+    "18:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 22f\n"
+    "19:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #0, 21f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "b 21f\n"
+    "20:"  // Oddments: Single input loop: Load: Bit 1: Unset
+    "tbz %x[n_channels], #0, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "21:"  // Oddments: Single input loop: Load: Bit 1: End
+    "fadd v6.4s, v6.4s, v2.4s\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 19b\n"
+    "22:"  // Oddments: Single input loop: End
+    "fmul v6.4s, v6.4s, v7.4s\n"
+    "tbz %x[n_channels], #1, 23f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #0, 24f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "b 24f\n"
+    "23:"  // Oddments: Store: Bit 1: Unset
+    "tbz %x[n_channels], #0, 24f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "24:"  // Oddments: Store: Bit 1: End
+
+    "25:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..9a22adf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..9ad4a39
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x19, #0x0]\n"
+    "cmp x15, #0x4\n"
+    "ldp x10, x9, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x28, x27, [x19, #0x0]\n"
+    "ldp x26, x25, [x19, #0x10]\n"
+    "ldp x24, x23, [x19, #0x20]\n"
+    "ldp x22, x21, [x19, #0x30]\n"
+    "ldr x20, [x19, #0x40]\n"
+    "blt 3f\n"
+    "lsr x19, x15, #0x2\n"
+    "sub x15, x15, x19, LSL #2\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
+    "ldr q25, [x23, x14]\n"
+    "ldr q24, [x26, x14]\n"
+    "ldr q23, [x22, x14]\n"
+    "ldr q22, [x20, x14]\n"
+    "add x14, x14, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "fmax v21.4s, v30.4s, v29.4s\n"
+    "ldr q30, [x27, x14]\n"
+    "fmax v20.4s, v29.4s, v28.4s\n"
+    "ldr q29, [x24, x14]\n"
+    "fmax v19.4s, v27.4s, v26.4s\n"
+    "ldr q28, [x21, x14]\n"
+    "fmax v18.4s, v25.4s, v24.4s\n"
+    "ldr q26, [x28, x14]\n"
+    "fmax v17.4s, v23.4s, v27.4s\n"
+    "ldr q27, [x25, x14]\n"
+    "fmax v16.4s, v25.4s, v22.4s\n"
+    "ldr q25, [x23, x14]\n"
+    "fmax v19.4s, v21.4s, v19.4s\n"
+    "ldr q24, [x26, x14]\n"
+    "fmax v18.4s, v21.4s, v18.4s\n"
+    "ldr q23, [x22, x14]\n"
+    "fmax v17.4s, v20.4s, v17.4s\n"
+    "ldr q22, [x20, x14]\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "add x14, x14, #0x10\n"
+    "str q19, [x12, x13]\n"
+    "str q18, [x11, x13]\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "fmax v21.4s, v30.4s, v29.4s\n"
+    "fmax v20.4s, v29.4s, v28.4s\n"
+    "fmax v19.4s, v27.4s, v26.4s\n"
+    "fmax v18.4s, v25.4s, v24.4s\n"
+    "fmax v17.4s, v23.4s, v27.4s\n"
+    "fmax v16.4s, v25.4s, v22.4s\n"
+    "fmax v19.4s, v21.4s, v19.4s\n"
+    "str q19, [x12, x13]\n"
+    "fmax v18.4s, v21.4s, v18.4s\n"
+    "fmax v17.4s, v20.4s, v17.4s\n"
+    "str q18, [x11, x13]\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "cbz x15, 4f\n"
+    "3:"  // Oddments
+    "ldr s30, [x27, x14]\n"
+    "ldr s29, [x24, x14]\n"
+    "fmax v21.4s, v30.4s, v29.4s\n"
+    "ldr s28, [x21, x14]\n"
+    "ldr s27, [x25, x14]\n"
+    "fmax v20.4s, v29.4s, v28.4s\n"
+    "ldr s26, [x28, x14]\n"
+    "ldr s25, [x23, x14]\n"
+    "fmax v19.4s, v27.4s, v26.4s\n"
+    "ldr s24, [x26, x14]\n"
+    "ldr s23, [x22, x14]\n"
+    "fmax v19.4s, v21.4s, v19.4s\n"
+    "ldr s22, [x20, x14]\n"
+    "add x14, x14, #0x4\n"
+    "fmax v18.4s, v25.4s, v24.4s\n"
+    "subs x15, x15, #0x1\n"
+    "fmax v17.4s, v23.4s, v27.4s\n"
+    "str s19, [x12, x13]\n"
+    "fmax v16.4s, v25.4s, v22.4s\n"
+    "fmax v18.4s, v21.4s, v18.4s\n"
+    "str s18, [x11, x13]\n"
+    "fmax v17.4s, v20.4s, v17.4s\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "str s17, [x10, x13]\n"
+    "str s16, [x9, x13]\n"
+    "add x13, x13, #0x4\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..4b39237
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_fp32_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct a64_fp32_nhwc_max_generic_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_fp32_nhwc_max_generic_depthfirst_impl;
+
+  a64_fp32_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..f9619b9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_fp32_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x10\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov w20, #0xff800000\n"
+    "dup v6.4s, w20\n"
+    "mov x19, %x[inptrs]\n"
+    "dup v5.4s, w20\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v4.4s, w20\n"
+    "dup v3.4s, w20\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax v22.4s, v30.4s, v22.4s\n"
+    "add x19, x19, #0x20\n"
+    "fmax v18.4s, v29.4s, v28.4s\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v21.4s, v27.4s, v21.4s\n"
+    "fmax v17.4s, v26.4s, v17.4s\n"
+    "ldr q1, [x22, x28]\n"
+    "fmax v20.4s, v25.4s, v20.4s\n"
+    "ldr q0, [x21, x28]\n"
+    "fmax v16.4s, v24.4s, v16.4s\n"
+    "ldr q31, [x20, x28]\n"
+    "fmax v19.4s, v23.4s, v19.4s\n"
+    "ldr q30, [x23, x27]\n"
+    "fmax v18.4s, v22.4s, v18.4s\n"
+    "ldr q22, [x22, x27]\n"
+    "fmax v17.4s, v21.4s, v17.4s\n"
+    "ldr q29, [x21, x27]\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "ldr q28, [x20, x27]\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "ldr q27, [x23, x26]\n"
+    "fmax v5.4s, v5.4s, v18.4s\n"
+    "ldr q21, [x22, x26]\n"
+    "fmax v4.4s, v4.4s, v17.4s\n"
+    "ldr q26, [x21, x26]\n"
+    "fmax v3.4s, v3.4s, v16.4s\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "fmax v22.4s, v30.4s, v22.4s\n"
+    "fmax v18.4s, v29.4s, v28.4s\n"
+    "fmax v21.4s, v27.4s, v21.4s\n"
+    "fmax v17.4s, v26.4s, v17.4s\n"
+    "fmax v20.4s, v25.4s, v20.4s\n"
+    "fmax v16.4s, v24.4s, v16.4s\n"
+    "fmax v19.4s, v23.4s, v19.4s\n"
+    "fmax v18.4s, v22.4s, v18.4s\n"
+    "fmax v17.4s, v21.4s, v17.4s\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "fmax v5.4s, v5.4s, v18.4s\n"
+    "fmax v4.4s, v4.4s, v17.4s\n"
+    "fmax v3.4s, v3.4s, v16.4s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.4s, v6.4s, v2.4s\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "ldr q25, [x23, x25]\n"
+    "fmax v4.4s, v4.4s, v27.4s\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "cmp %x[n_channels], #0x10\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 25f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x4\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov w19, #0xff800000\n"
+    "dup v6.4s, w19\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax v19.4s, v23.4s, v19.4s\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "fmax v19.4s, v23.4s, v19.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "fmax v6.4s, v6.4s, v2.4s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "sub %x[n_channels], %x[n_channels], #0x4\n"
+    "cmp %x[n_channels], #0x4\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 25f\n"
+    "14:"  // Oddments
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov w19, #0xff800000\n"
+    "dup v6.4s, w19\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 18f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #0, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "b 17f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 1: Unset
+    "tbz %x[n_channels], #0, 17f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 1: End
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "fmax v19.4s, v23.4s, v19.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "bgt 15b\n"
+    "18:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 22f\n"
+    "19:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #0, 21f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "b 21f\n"
+    "20:"  // Oddments: Single input loop: Load: Bit 1: Unset
+    "tbz %x[n_channels], #0, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "21:"  // Oddments: Single input loop: Load: Bit 1: End
+    "fmax v6.4s, v6.4s, v2.4s\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 19b\n"
+    "22:"  // Oddments: Single input loop: End
+    "tbz %x[n_channels], #1, 23f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #0, 24f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "b 24f\n"
+    "23:"  // Oddments: Store: Bit 1: Unset
+    "tbz %x[n_channels], #0, 24f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "24:"  // Oddments: Store: Bit 1: End
+
+    "25:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..da97f77
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct a64_s8_nhwc_avg_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_s8_nhwc_avg_generic_depthfirst_impl;
+
+  a64_s8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..4b1f988
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,630 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void a64_s8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n" // cntb _, ALL, #1
+    "mov x24, #0x20\n" // cntb _, ALL, #2
+    "mov x23, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "movi v11.4s, #0x0\n"
+    "movi v10.4s, #0x0\n"
+    "movi v9.4s, #0x0\n"
+    "movi v8.4s, #0x0\n"
+    "movi v7.4s, #0x0\n"
+    "movi v6.4s, #0x0\n"
+    "movi v5.4s, #0x0\n"
+    "movi v4.4s, #0x0\n"
+    "movi v3.4s, #0x0\n"
+    "movi v2.4s, #0x0\n"
+    "movi v1.4s, #0x0\n"
+    "movi v0.4s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "ldr q24, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
+    "ldr q31, [x21, x26]\n"
+    "saddl2 v20.8h, v29.16b, v28.16b\n"
+    "saddl v19.8h, v27.8b, v26.8b\n"
+    "ldr q30, [x20, x26]\n"
+    "saddl2 v18.8h, v27.16b, v26.16b\n"
+    "ldr q29, [x21, x25]\n"
+    "saddl v17.8h, v25.8b, v24.8b\n"
+    "ldr q28, [x20, x25]\n"
+    "saddl2 v16.8h, v25.16b, v24.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q26, [x20, x24]\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q25, [x21, x23]\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "ldr q24, [x20, x23]\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
+    "saddl2 v20.8h, v29.16b, v28.16b\n"
+    "saddl v19.8h, v27.8b, v26.8b\n"
+    "saddl2 v18.8h, v27.16b, v26.16b\n"
+    "saddl v17.8h, v25.8b, v24.8b\n"
+    "saddl2 v16.8h, v25.16b, v24.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "sxtl v16.8h, v31.8b\n"
+    "ldr q29, [x21, x25]\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "sxtl v21.8h, v29.8b\n"
+    "sxtl2 v20.8h, v29.16b\n"
+    "sxtl v19.8h, v27.8b\n"
+    "sxtl2 v18.8h, v27.16b\n"
+    "sxtl v17.8h, v25.8b\n"
+    "sxtl2 v16.8h, v25.16b\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "movi v19.4s, #0x7f\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
+    "not v16.16b, v19.16b\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x40\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "srshl v11.4s, v11.4s, v17.4s\n"
+    "srshl v10.4s, v10.4s, v17.4s\n"
+    "srshl v9.4s, v9.4s, v17.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v18.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v18.4s\n"
+    "srshl v8.4s, v8.4s, v17.4s\n"
+    "srshl v7.4s, v7.4s, v17.4s\n"
+    "srshl v6.4s, v6.4s, v17.4s\n"
+    "srshl v5.4s, v5.4s, v17.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v18.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v18.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v18.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v18.4s\n"
+    "srshl v4.4s, v4.4s, v17.4s\n"
+    "srshl v3.4s, v3.4s, v17.4s\n"
+    "srshl v2.4s, v2.4s, v17.4s\n"
+    "srshl v1.4s, v1.4s, v17.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "srshl v0.4s, v0.4s, v17.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smax v11.4s, v11.4s, v16.4s\n"
+    "smax v10.4s, v10.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "smin v11.4s, v11.4s, v19.4s\n"
+    "smin v10.4s, v10.4s, v19.4s\n"
+    "smax v9.4s, v9.4s, v16.4s\n"
+    "smax v8.4s, v8.4s, v16.4s\n"
+    "smax v7.4s, v7.4s, v16.4s\n"
+    "smin v9.4s, v9.4s, v19.4s\n"
+    "smin v8.4s, v8.4s, v19.4s\n"
+    "smin v7.4s, v7.4s, v19.4s\n"
+    "smax v6.4s, v6.4s, v16.4s\n"
+    "smax v5.4s, v5.4s, v16.4s\n"
+    "smax v4.4s, v4.4s, v16.4s\n"
+    "smin v6.4s, v6.4s, v19.4s\n"
+    "smin v5.4s, v5.4s, v19.4s\n"
+    "smin v4.4s, v4.4s, v19.4s\n"
+    "smax v3.4s, v3.4s, v16.4s\n"
+    "smax v2.4s, v2.4s, v16.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smin v3.4s, v3.4s, v19.4s\n"
+    "smin v2.4s, v2.4s, v19.4s\n"
+    "smin v1.4s, v1.4s, v19.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "smin v0.4s, v0.4s, v19.4s\n"
+    "uzp1 v22.16b, v11.16b, v10.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
+    "uzp1 v17.16b, v5.16b, v4.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "add x26, x26, #0x40\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
+    "str q17, [%x[outptr], x24]\n"
+    "str q16, [%x[outptr], x23]\n"
+    "add x25, x25, #0x40\n"
+    "add x24, x24, #0x40\n"
+    "add x23, x23, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q31, [x21, x26]\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q30, [x20, x26]\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "sxtl v16.8h, v31.8b\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "movi v19.4s, #0x7f\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
+    "not v16.16b, v19.16b\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x10\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "add x26, x26, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v15.4s, #0x0\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v13.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 24f\n"
+    "15:"  // Oddments: 2 inputs loop
+    "movi v31.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "movi v30.16b, #0x0\n"
+    "add x21, x21, x26\n"
+    "add x20, x20, x26\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d31, [x21], #0x8\n"
+    "ldr d30, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "ld1 { v30.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "ld1 { v30.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "ld1 { v30.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "ld1 { v30.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "ld1 { v30.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "ld1 { v30.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "ld1 { v30.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s31, [x21], #0x4\n"
+    "ldr s30, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "ld1 { v30.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "ld1 { v30.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "ld1 { v30.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h31, [x21], #0x2\n"
+    "ldr h30, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "ld1 { v30.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b31, [x21], #0x1\n"
+    "ldr b30, [x20], #0x1\n"
+    "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "subs x22, x22, #0x1\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v31.16b, #0x0\n"
+    "ldr x21, [x19], #0x8\n"
+    "add x21, x21, x26\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d31, [x21], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s31, [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h31, [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b31, [x21], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "sxtl v16.8h, v31.8b\n"
+    "subs x20, x20, #0x1\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "movi v19.4s, #0x7f\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "not v16.16b, v19.16b\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..7829ecc
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..0bf6a66
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const int8_t *const *const inptrs;
+    int8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const int8_t *const *input_ptrs,
+      int8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x19, #0x0]\n"
+    "cmp x15, #0x10\n"
+    "ldp x10, x9, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x28, x27, [x19, #0x0]\n"
+    "ldp x26, x25, [x19, #0x10]\n"
+    "ldp x24, x23, [x19, #0x20]\n"
+    "ldp x22, x21, [x19, #0x30]\n"
+    "ldr x20, [x19, #0x40]\n"
+    "blt 3f\n"
+    "lsr x19, x15, #0x4\n"
+    "sub x15, x15, x19, LSL #4\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
+    "ldr q25, [x23, x14]\n"
+    "ldr q24, [x26, x14]\n"
+    "ldr q23, [x22, x14]\n"
+    "ldr q22, [x20, x14]\n"
+    "add x14, x14, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "smax v21.16b, v30.16b, v29.16b\n"
+    "ldr q30, [x27, x14]\n"
+    "smax v20.16b, v29.16b, v28.16b\n"
+    "ldr q29, [x24, x14]\n"
+    "smax v19.16b, v27.16b, v26.16b\n"
+    "ldr q28, [x21, x14]\n"
+    "smax v18.16b, v25.16b, v24.16b\n"
+    "ldr q26, [x28, x14]\n"
+    "smax v17.16b, v23.16b, v27.16b\n"
+    "ldr q27, [x25, x14]\n"
+    "smax v16.16b, v25.16b, v22.16b\n"
+    "ldr q25, [x23, x14]\n"
+    "smax v19.16b, v21.16b, v19.16b\n"
+    "ldr q24, [x26, x14]\n"
+    "smax v18.16b, v21.16b, v18.16b\n"
+    "ldr q23, [x22, x14]\n"
+    "smax v17.16b, v20.16b, v17.16b\n"
+    "ldr q22, [x20, x14]\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "add x14, x14, #0x10\n"
+    "str q19, [x12, x13]\n"
+    "str q18, [x11, x13]\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "smax v21.16b, v30.16b, v29.16b\n"
+    "smax v20.16b, v29.16b, v28.16b\n"
+    "smax v19.16b, v27.16b, v26.16b\n"
+    "smax v18.16b, v25.16b, v24.16b\n"
+    "smax v17.16b, v23.16b, v27.16b\n"
+    "smax v16.16b, v25.16b, v22.16b\n"
+    "smax v19.16b, v21.16b, v19.16b\n"
+    "str q19, [x12, x13]\n"
+    "smax v18.16b, v21.16b, v18.16b\n"
+    "smax v17.16b, v20.16b, v17.16b\n"
+    "str q18, [x11, x13]\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "cbz x15, 4f\n"
+    "3:"  // Oddments
+    "ldr b30, [x27, x14]\n"
+    "ldr b29, [x24, x14]\n"
+    "smax v21.16b, v30.16b, v29.16b\n"
+    "ldr b28, [x21, x14]\n"
+    "ldr b27, [x25, x14]\n"
+    "smax v20.16b, v29.16b, v28.16b\n"
+    "ldr b26, [x28, x14]\n"
+    "ldr b25, [x23, x14]\n"
+    "smax v19.16b, v27.16b, v26.16b\n"
+    "ldr b24, [x26, x14]\n"
+    "ldr b23, [x22, x14]\n"
+    "smax v19.16b, v21.16b, v19.16b\n"
+    "ldr b22, [x20, x14]\n"
+    "add x14, x14, #0x1\n"
+    "smax v18.16b, v25.16b, v24.16b\n"
+    "subs x15, x15, #0x1\n"
+    "smax v17.16b, v23.16b, v27.16b\n"
+    "str b19, [x12, x13]\n"
+    "smax v16.16b, v25.16b, v22.16b\n"
+    "smax v18.16b, v21.16b, v18.16b\n"
+    "str b18, [x11, x13]\n"
+    "smax v17.16b, v20.16b, v17.16b\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "str b17, [x10, x13]\n"
+    "str b16, [x9, x13]\n"
+    "add x13, x13, #0x1\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..bc54992
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct a64_s8_nhwc_max_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_s8_nhwc_max_generic_depthfirst_impl;
+
+  a64_s8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..0b7e6df
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_s8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v6.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v5.16b, #0x80\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x80\n"
+    "movi v3.16b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
+    "add x19, x19, #0x20\n"
+    "smax v18.16b, v29.16b, v28.16b\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v21.16b, v27.16b, v21.16b\n"
+    "smax v17.16b, v26.16b, v17.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "smax v20.16b, v25.16b, v20.16b\n"
+    "ldr q0, [x21, x28]\n"
+    "smax v16.16b, v24.16b, v16.16b\n"
+    "ldr q31, [x20, x28]\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "smax v18.16b, v22.16b, v18.16b\n"
+    "ldr q22, [x22, x27]\n"
+    "smax v17.16b, v21.16b, v17.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "ldr q28, [x20, x27]\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "ldr q27, [x23, x26]\n"
+    "smax v5.16b, v5.16b, v18.16b\n"
+    "ldr q21, [x22, x26]\n"
+    "smax v4.16b, v4.16b, v17.16b\n"
+    "ldr q26, [x21, x26]\n"
+    "smax v3.16b, v3.16b, v16.16b\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
+    "smax v18.16b, v29.16b, v28.16b\n"
+    "smax v21.16b, v27.16b, v21.16b\n"
+    "smax v17.16b, v26.16b, v17.16b\n"
+    "smax v20.16b, v25.16b, v20.16b\n"
+    "smax v16.16b, v24.16b, v16.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v18.16b, v22.16b, v18.16b\n"
+    "smax v17.16b, v21.16b, v17.16b\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "smax v5.16b, v5.16b, v18.16b\n"
+    "smax v4.16b, v4.16b, v17.16b\n"
+    "smax v3.16b, v3.16b, v16.16b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v6.16b, v6.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "smax v5.16b, v5.16b, v30.16b\n"
+    "ldr q25, [x23, x25]\n"
+    "smax v4.16b, v4.16b, v27.16b\n"
+    "smax v3.16b, v3.16b, v25.16b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "cmp %x[n_channels], #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v6.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v6.16b, v6.16b, v2.16b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "cmp %x[n_channels], #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v6.16b, #0x80\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 24f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
+    "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h2, [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b2, [x23], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "smax v6.16b, v6.16b, v2.16b\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..e5354ca
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct a64_s8q_nhwc_avg_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_s8q_nhwc_avg_generic_depthfirst_impl;
+
+  a64_s8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..7246b69
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void a64_s8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "v16", "v17", "v18"
+  );
+
+  __asm__ __volatile__(
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n" // cntb _, ALL, #1
+    "mov x24, #0x20\n" // cntb _, ALL, #2
+    "mov x23, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "movi v11.4s, #0x0\n"
+    "movi v10.4s, #0x0\n"
+    "movi v9.4s, #0x0\n"
+    "movi v8.4s, #0x0\n"
+    "movi v7.4s, #0x0\n"
+    "movi v6.4s, #0x0\n"
+    "movi v5.4s, #0x0\n"
+    "movi v4.4s, #0x0\n"
+    "movi v3.4s, #0x0\n"
+    "movi v2.4s, #0x0\n"
+    "movi v1.4s, #0x0\n"
+    "movi v0.4s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "ldr q24, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
+    "ldr q31, [x21, x26]\n"
+    "saddl2 v20.8h, v29.16b, v28.16b\n"
+    "saddl v19.8h, v27.8b, v26.8b\n"
+    "ldr q30, [x20, x26]\n"
+    "saddl2 v18.8h, v27.16b, v26.16b\n"
+    "ldr q29, [x21, x25]\n"
+    "saddl v17.8h, v25.8b, v24.8b\n"
+    "ldr q28, [x20, x25]\n"
+    "saddl2 v16.8h, v25.16b, v24.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q26, [x20, x24]\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q25, [x21, x23]\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "ldr q24, [x20, x23]\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
+    "saddl2 v20.8h, v29.16b, v28.16b\n"
+    "saddl v19.8h, v27.8b, v26.8b\n"
+    "saddl2 v18.8h, v27.16b, v26.16b\n"
+    "saddl v17.8h, v25.8b, v24.8b\n"
+    "saddl2 v16.8h, v25.16b, v24.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "sxtl v16.8h, v31.8b\n"
+    "ldr q29, [x21, x25]\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "sxtl v21.8h, v29.8b\n"
+    "sxtl2 v20.8h, v29.16b\n"
+    "sxtl v19.8h, v27.8b\n"
+    "sxtl2 v18.8h, v27.16b\n"
+    "sxtl v17.8h, v25.8b\n"
+    "sxtl2 v16.8h, v25.16b\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "saddw v11.4s, v11.4s, v21.4h\n"
+    "saddw2 v10.4s, v10.4s, v21.8h\n"
+    "saddw v9.4s, v9.4s, v20.4h\n"
+    "saddw2 v8.4s, v8.4s, v20.8h\n"
+    "saddw v7.4s, v7.4s, v19.4h\n"
+    "saddw2 v6.4s, v6.4s, v19.8h\n"
+    "saddw v5.4s, v5.4s, v18.4h\n"
+    "saddw2 v4.4s, v4.4s, v18.8h\n"
+    "saddw v3.4s, v3.4s, v17.4h\n"
+    "saddw2 v2.4s, v2.4s, v17.8h\n"
+    "saddw v1.4s, v1.4s, v16.4h\n"
+    "saddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "movi v20.4s, #0x7f\n"
+    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "not v16.16b, v20.16b\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x40\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "srshl v11.4s, v11.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v19.4s\n"
+    "srshl v10.4s, v10.4s, v18.4s\n"
+    "srshl v9.4s, v9.4s, v18.4s\n"
+    "srshl v8.4s, v8.4s, v18.4s\n"
+    "srshl v11.4s, v11.4s, v17.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v19.4s\n"
+    "srshl v7.4s, v7.4s, v18.4s\n"
+    "srshl v10.4s, v10.4s, v17.4s\n"
+    "srshl v9.4s, v9.4s, v17.4s\n"
+    "srshl v8.4s, v8.4s, v17.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v19.4s\n"
+    "srshl v6.4s, v6.4s, v18.4s\n"
+    "srshl v5.4s, v5.4s, v18.4s\n"
+    "srshl v4.4s, v4.4s, v18.4s\n"
+    "srshl v7.4s, v7.4s, v17.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v19.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v19.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v19.4s\n"
+    "srshl v3.4s, v3.4s, v18.4s\n"
+    "srshl v6.4s, v6.4s, v17.4s\n"
+    "srshl v5.4s, v5.4s, v17.4s\n"
+    "srshl v4.4s, v4.4s, v17.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v19.4s\n"
+    "srshl v2.4s, v2.4s, v18.4s\n"
+    "srshl v1.4s, v1.4s, v18.4s\n"
+    "srshl v0.4s, v0.4s, v18.4s\n"
+    "srshl v3.4s, v3.4s, v17.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v19.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v19.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v19.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "srshl v2.4s, v2.4s, v17.4s\n"
+    "srshl v1.4s, v1.4s, v17.4s\n"
+    "srshl v0.4s, v0.4s, v17.4s\n"
+    "smin v15.4s, v15.4s, v20.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v20.4s\n"
+    "smin v13.4s, v13.4s, v20.4s\n"
+    "smin v12.4s, v12.4s, v20.4s\n"
+    "smax v11.4s, v11.4s, v16.4s\n"
+    "smax v10.4s, v10.4s, v16.4s\n"
+    "smax v9.4s, v9.4s, v16.4s\n"
+    "smin v11.4s, v11.4s, v20.4s\n"
+    "smin v10.4s, v10.4s, v20.4s\n"
+    "smin v9.4s, v9.4s, v20.4s\n"
+    "smax v8.4s, v8.4s, v16.4s\n"
+    "smax v7.4s, v7.4s, v16.4s\n"
+    "smax v6.4s, v6.4s, v16.4s\n"
+    "smin v8.4s, v8.4s, v20.4s\n"
+    "smin v7.4s, v7.4s, v20.4s\n"
+    "smin v6.4s, v6.4s, v20.4s\n"
+    "smax v5.4s, v5.4s, v16.4s\n"
+    "smax v4.4s, v4.4s, v16.4s\n"
+    "smax v3.4s, v3.4s, v16.4s\n"
+    "smin v5.4s, v5.4s, v20.4s\n"
+    "smin v4.4s, v4.4s, v20.4s\n"
+    "smin v3.4s, v3.4s, v20.4s\n"
+    "smax v2.4s, v2.4s, v16.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smin v2.4s, v2.4s, v20.4s\n"
+    "smin v1.4s, v1.4s, v20.4s\n"
+    "smin v0.4s, v0.4s, v20.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v22.16b, v11.16b, v10.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
+    "uzp1 v17.16b, v5.16b, v4.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "add x26, x26, #0x40\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
+    "str q17, [%x[outptr], x24]\n"
+    "str q16, [%x[outptr], x23]\n"
+    "add x25, x25, #0x40\n"
+    "add x24, x24, #0x40\n"
+    "add x23, x23, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q31, [x21, x26]\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q30, [x20, x26]\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "sxtl v16.8h, v31.8b\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "movi v20.4s, #0x7f\n"
+    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "not v16.16b, v20.16b\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x10\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v20.4s\n"
+    "smin v14.4s, v14.4s, v20.4s\n"
+    "smin v13.4s, v13.4s, v20.4s\n"
+    "smin v12.4s, v12.4s, v20.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "add x26, x26, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v15.4s, #0x0\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v13.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 24f\n"
+    "15:"  // Oddments: 2 inputs loop
+    "movi v31.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "movi v30.16b, #0x0\n"
+    "add x21, x21, x26\n"
+    "add x20, x20, x26\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d31, [x21], #0x8\n"
+    "ldr d30, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "ld1 { v30.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "ld1 { v30.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "ld1 { v30.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "ld1 { v30.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "ld1 { v30.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "ld1 { v30.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "ld1 { v30.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s31, [x21], #0x4\n"
+    "ldr s30, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "ld1 { v30.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "ld1 { v30.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "ld1 { v30.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h31, [x21], #0x2\n"
+    "ldr h30, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "ld1 { v30.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b31, [x21], #0x1\n"
+    "ldr b30, [x20], #0x1\n"
+    "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
+    "saddl v23.8h, v31.8b, v30.8b\n"
+    "subs x22, x22, #0x1\n"
+    "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
+    "saddw2 v14.4s, v14.4s, v23.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v31.16b, #0x0\n"
+    "ldr x21, [x19], #0x8\n"
+    "add x21, x21, x26\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d31, [x21], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s31, [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h31, [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b31, [x21], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "sxtl v16.8h, v31.8b\n"
+    "subs x20, x20, #0x1\n"
+    "sxtl2 v22.8h, v31.16b\n"
+    "saddw v15.4s, v15.4s, v16.4h\n"
+    "saddw2 v14.4s, v14.4s, v16.8h\n"
+    "saddw v13.4s, v13.4s, v22.4h\n"
+    "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "movi v20.4s, #0x7f\n"
+    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+    "not v16.16b, v20.16b\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v20.4s\n"
+    "smin v14.4s, v14.4s, v20.4s\n"
+    "smin v13.4s, v13.4s, v20.4s\n"
+    "smin v12.4s, v12.4s, v20.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..6abbcd0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_s8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct a64_s8q_nhwc_max_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_s8q_nhwc_max_generic_depthfirst_impl;
+
+  a64_s8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..33cf634
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,640 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_s8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v3.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v7.16b, #0x80\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x80\n"
+    "movi v5.16b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
+    "add x19, x19, #0x20\n"
+    "smax v18.16b, v29.16b, v28.16b\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v21.16b, v27.16b, v21.16b\n"
+    "smax v17.16b, v26.16b, v17.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "smax v20.16b, v25.16b, v20.16b\n"
+    "ldr q0, [x21, x28]\n"
+    "smax v16.16b, v24.16b, v16.16b\n"
+    "ldr q31, [x20, x28]\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "smax v18.16b, v22.16b, v18.16b\n"
+    "ldr q22, [x22, x27]\n"
+    "smax v17.16b, v21.16b, v17.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "ldr q28, [x20, x27]\n"
+    "smax v3.16b, v3.16b, v19.16b\n"
+    "ldr q27, [x23, x26]\n"
+    "smax v7.16b, v7.16b, v18.16b\n"
+    "ldr q21, [x22, x26]\n"
+    "smax v6.16b, v6.16b, v17.16b\n"
+    "ldr q26, [x21, x26]\n"
+    "smax v5.16b, v5.16b, v16.16b\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
+    "smax v18.16b, v29.16b, v28.16b\n"
+    "smax v21.16b, v27.16b, v21.16b\n"
+    "smax v17.16b, v26.16b, v17.16b\n"
+    "smax v20.16b, v25.16b, v20.16b\n"
+    "smax v16.16b, v24.16b, v16.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v18.16b, v22.16b, v18.16b\n"
+    "smax v17.16b, v21.16b, v17.16b\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "smax v3.16b, v3.16b, v19.16b\n"
+    "smax v7.16b, v7.16b, v18.16b\n"
+    "smax v6.16b, v6.16b, v17.16b\n"
+    "smax v5.16b, v5.16b, v16.16b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v3.16b, v3.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "smax v7.16b, v7.16b, v30.16b\n"
+    "ldr q25, [x23, x25]\n"
+    "smax v6.16b, v6.16b, v27.16b\n"
+    "smax v5.16b, v5.16b, v25.16b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "sxtl v23.8h, v3.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "sxtl2 v22.8h, v3.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "sxtl v21.8h, v7.8b\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "sxtl2 v18.8h, v7.16b\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "sxtl v20.8h, v6.8b\n"
+    "cmp %x[n_channels], #0x40\n"
+    "sxtl2 v19.8h, v6.16b\n"
+    "sxtl v17.8h, v5.8b\n"
+    "sxtl2 v16.8h, v5.16b\n"
+    "sxtl v1.4s, v23.4h\n"
+    "sxtl2 v23.4s, v23.8h\n"
+    "sxtl v0.4s, v22.4h\n"
+    "sxtl2 v31.4s, v22.8h\n"
+    "sxtl v30.4s, v21.4h\n"
+    "sxtl2 v22.4s, v21.8h\n"
+    "sxtl v29.4s, v18.4h\n"
+    "sxtl2 v18.4s, v18.8h\n"
+    "sxtl v28.4s, v20.4h\n"
+    "sxtl2 v21.4s, v20.8h\n"
+    "sxtl v27.4s, v19.4h\n"
+    "sxtl2 v26.4s, v19.8h\n"
+    "sxtl v25.4s, v17.4h\n"
+    "sxtl2 v20.4s, v17.8h\n"
+    "sxtl v24.4s, v16.4h\n"
+    "sxtl2 v19.4s, v16.8h\n"
+    "srshl v1.4s, v1.4s, v3.4s\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "srshl v1.4s, v1.4s, v2.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v2.4s\n"
+    "srshl v31.4s, v31.4s, v2.4s\n"
+    "srshl v30.4s, v30.4s, v3.4s\n"
+    "srshl v22.4s, v22.4s, v3.4s\n"
+    "srshl v29.4s, v29.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "sqrdmulh v30.4s, v30.4s, v4.4s\n"
+    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
+    "sqrdmulh v29.4s, v29.4s, v4.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+    "srshl v30.4s, v30.4s, v2.4s\n"
+    "srshl v22.4s, v22.4s, v2.4s\n"
+    "srshl v29.4s, v29.4s, v2.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
+    "srshl v28.4s, v28.4s, v3.4s\n"
+    "srshl v21.4s, v21.4s, v3.4s\n"
+    "srshl v27.4s, v27.4s, v3.4s\n"
+    "srshl v26.4s, v26.4s, v3.4s\n"
+    "sqrdmulh v28.4s, v28.4s, v4.4s\n"
+    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+    "sqrdmulh v27.4s, v27.4s, v4.4s\n"
+    "sqrdmulh v26.4s, v26.4s, v4.4s\n"
+    "srshl v28.4s, v28.4s, v2.4s\n"
+    "srshl v21.4s, v21.4s, v2.4s\n"
+    "srshl v27.4s, v27.4s, v2.4s\n"
+    "srshl v26.4s, v26.4s, v2.4s\n"
+    "srshl v25.4s, v25.4s, v3.4s\n"
+    "srshl v20.4s, v20.4s, v3.4s\n"
+    "srshl v24.4s, v24.4s, v3.4s\n"
+    "srshl v19.4s, v19.4s, v3.4s\n"
+    "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+    "sqrdmulh v20.4s, v20.4s, v4.4s\n"
+    "sqrdmulh v24.4s, v24.4s, v4.4s\n"
+    "sqrdmulh v19.4s, v19.4s, v4.4s\n"
+    "srshl v25.4s, v25.4s, v2.4s\n"
+    "srshl v20.4s, v20.4s, v2.4s\n"
+    "srshl v24.4s, v24.4s, v2.4s\n"
+    "srshl v19.4s, v19.4s, v2.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "not v16.16b, v17.16b\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v23.4s, v23.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
+    "smin v31.4s, v31.4s, v17.4s\n"
+    "smax v30.4s, v30.4s, v16.4s\n"
+    "smax v22.4s, v22.4s, v16.4s\n"
+    "smax v29.4s, v29.4s, v16.4s\n"
+    "smin v30.4s, v30.4s, v17.4s\n"
+    "smin v22.4s, v22.4s, v17.4s\n"
+    "smin v29.4s, v29.4s, v17.4s\n"
+    "smax v18.4s, v18.4s, v16.4s\n"
+    "smax v28.4s, v28.4s, v16.4s\n"
+    "smax v21.4s, v21.4s, v16.4s\n"
+    "smin v18.4s, v18.4s, v17.4s\n"
+    "smin v28.4s, v28.4s, v17.4s\n"
+    "smin v21.4s, v21.4s, v17.4s\n"
+    "smax v27.4s, v27.4s, v16.4s\n"
+    "smax v26.4s, v26.4s, v16.4s\n"
+    "smax v25.4s, v25.4s, v16.4s\n"
+    "smin v27.4s, v27.4s, v17.4s\n"
+    "smin v26.4s, v26.4s, v17.4s\n"
+    "smin v25.4s, v25.4s, v17.4s\n"
+    "smax v20.4s, v20.4s, v16.4s\n"
+    "smax v24.4s, v24.4s, v16.4s\n"
+    "smax v19.4s, v19.4s, v16.4s\n"
+    "smin v20.4s, v20.4s, v17.4s\n"
+    "smin v24.4s, v24.4s, v17.4s\n"
+    "smin v19.4s, v19.4s, v17.4s\n"
+    "uzp1 v23.16b, v1.16b, v23.16b\n"
+    "uzp1 v16.16b, v0.16b, v31.16b\n"
+    "uzp1 v22.16b, v30.16b, v22.16b\n"
+    "uzp1 v18.16b, v29.16b, v18.16b\n"
+    "uzp1 v21.16b, v28.16b, v21.16b\n"
+    "uzp1 v17.16b, v27.16b, v26.16b\n"
+    "uzp1 v20.16b, v25.16b, v20.16b\n"
+    "uzp1 v19.16b, v24.16b, v19.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x28]\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "add x28, x28, #0x40\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x27]\n"
+    "str q17, [%x[outptr], x26]\n"
+    "str q16, [%x[outptr], x25]\n"
+    "add x27, x27, #0x40\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v3.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v3.16b, v3.16b, v19.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v3.16b, v3.16b, v19.16b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "smax v3.16b, v3.16b, v2.16b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "sxtl v23.8h, v3.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "sxtl2 v22.8h, v3.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "movi v17.4s, #0x7f\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "sxtl v1.4s, v23.4h\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "not v16.16b, v17.16b\n"
+    "sxtl2 v23.4s, v23.8h\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "sxtl v0.4s, v22.4h\n"
+    "cmp %x[n_channels], #0x10\n"
+    "sxtl2 v31.4s, v22.8h\n"
+    "srshl v1.4s, v1.4s, v3.4s\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "srshl v1.4s, v1.4s, v2.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v2.4s\n"
+    "srshl v31.4s, v31.4s, v2.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v23.4s, v23.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
+    "smin v31.4s, v31.4s, v17.4s\n"
+    "uzp1 v23.16b, v1.16b, v23.16b\n"
+    "uzp1 v16.16b, v0.16b, v31.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v3.16b, #0x80\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 24f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
+    "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "subs x24, x24, #0x1\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v19.16b, v23.16b, v19.16b\n"
+    "smax v3.16b, v3.16b, v19.16b\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h2, [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b2, [x23], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "smax v3.16b, v3.16b, v2.16b\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "sxtl v23.8h, v3.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "sxtl2 v22.8h, v3.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "movi v17.4s, #0x7f\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "sxtl v1.4s, v23.4h\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "not v16.16b, v17.16b\n"
+    "sxtl2 v23.4s, v23.8h\n"
+    "sxtl v0.4s, v22.4h\n"
+    "sxtl2 v31.4s, v22.8h\n"
+    "srshl v1.4s, v1.4s, v3.4s\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "srshl v1.4s, v1.4s, v2.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v2.4s\n"
+    "srshl v31.4s, v31.4s, v2.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v23.4s, v23.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
+    "smin v31.4s, v31.4s, v17.4s\n"
+    "uzp1 v23.16b, v1.16b, v23.16b\n"
+    "uzp1 v16.16b, v0.16b, v31.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..9439286
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct a64_u8_nhwc_avg_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_u8_nhwc_avg_generic_depthfirst_impl;
+
+  a64_u8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..1d210cb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,630 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void a64_u8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n" // cntb _, ALL, #1
+    "mov x24, #0x20\n" // cntb _, ALL, #2
+    "mov x23, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "movi v11.4s, #0x0\n"
+    "movi v10.4s, #0x0\n"
+    "movi v9.4s, #0x0\n"
+    "movi v8.4s, #0x0\n"
+    "movi v7.4s, #0x0\n"
+    "movi v6.4s, #0x0\n"
+    "movi v5.4s, #0x0\n"
+    "movi v4.4s, #0x0\n"
+    "movi v3.4s, #0x0\n"
+    "movi v2.4s, #0x0\n"
+    "movi v1.4s, #0x0\n"
+    "movi v0.4s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "ldr q24, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "ldr q31, [x21, x26]\n"
+    "uaddl2 v20.8h, v29.16b, v28.16b\n"
+    "uaddl v19.8h, v27.8b, v26.8b\n"
+    "ldr q30, [x20, x26]\n"
+    "uaddl2 v18.8h, v27.16b, v26.16b\n"
+    "ldr q29, [x21, x25]\n"
+    "uaddl v17.8h, v25.8b, v24.8b\n"
+    "ldr q28, [x20, x25]\n"
+    "uaddl2 v16.8h, v25.16b, v24.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q26, [x20, x24]\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q25, [x21, x23]\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "ldr q24, [x20, x23]\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "uaddl2 v20.8h, v29.16b, v28.16b\n"
+    "uaddl v19.8h, v27.8b, v26.8b\n"
+    "uaddl2 v18.8h, v27.16b, v26.16b\n"
+    "uaddl v17.8h, v25.8b, v24.8b\n"
+    "uaddl2 v16.8h, v25.16b, v24.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "uxtl v16.8h, v31.8b\n"
+    "ldr q29, [x21, x25]\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uxtl v21.8h, v29.8b\n"
+    "uxtl2 v20.8h, v29.16b\n"
+    "uxtl v19.8h, v27.8b\n"
+    "uxtl2 v18.8h, v27.16b\n"
+    "uxtl v17.8h, v25.8b\n"
+    "uxtl2 v16.8h, v25.16b\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "movi v19.4s, #0x0\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "movi v17.4s, #0xff\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "cmp %x[n_channels], #0x40\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "srshl v11.4s, v11.4s, v16.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v18.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v18.4s\n"
+    "srshl v10.4s, v10.4s, v16.4s\n"
+    "srshl v9.4s, v9.4s, v16.4s\n"
+    "srshl v8.4s, v8.4s, v16.4s\n"
+    "srshl v7.4s, v7.4s, v16.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v18.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v18.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v18.4s\n"
+    "srshl v6.4s, v6.4s, v16.4s\n"
+    "srshl v5.4s, v5.4s, v16.4s\n"
+    "srshl v4.4s, v4.4s, v16.4s\n"
+    "srshl v3.4s, v3.4s, v16.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v18.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v18.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+    "smax v15.4s, v15.4s, v19.4s\n"
+    "srshl v2.4s, v2.4s, v16.4s\n"
+    "srshl v1.4s, v1.4s, v16.4s\n"
+    "srshl v0.4s, v0.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smax v14.4s, v14.4s, v19.4s\n"
+    "smax v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
+    "smax v11.4s, v11.4s, v19.4s\n"
+    "smax v10.4s, v10.4s, v19.4s\n"
+    "smax v9.4s, v9.4s, v19.4s\n"
+    "smin v11.4s, v11.4s, v17.4s\n"
+    "smin v10.4s, v10.4s, v17.4s\n"
+    "smin v9.4s, v9.4s, v17.4s\n"
+    "smax v8.4s, v8.4s, v19.4s\n"
+    "smax v7.4s, v7.4s, v19.4s\n"
+    "smax v6.4s, v6.4s, v19.4s\n"
+    "smin v8.4s, v8.4s, v17.4s\n"
+    "smin v7.4s, v7.4s, v17.4s\n"
+    "smin v6.4s, v6.4s, v17.4s\n"
+    "smax v5.4s, v5.4s, v19.4s\n"
+    "smax v4.4s, v4.4s, v19.4s\n"
+    "smax v3.4s, v3.4s, v19.4s\n"
+    "smin v5.4s, v5.4s, v17.4s\n"
+    "smin v4.4s, v4.4s, v17.4s\n"
+    "smin v3.4s, v3.4s, v17.4s\n"
+    "smax v2.4s, v2.4s, v19.4s\n"
+    "smax v1.4s, v1.4s, v19.4s\n"
+    "smax v0.4s, v0.4s, v19.4s\n"
+    "smin v2.4s, v2.4s, v17.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v22.16b, v11.16b, v10.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
+    "uzp1 v17.16b, v5.16b, v4.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "add x26, x26, #0x40\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
+    "str q17, [%x[outptr], x24]\n"
+    "str q16, [%x[outptr], x23]\n"
+    "add x25, x25, #0x40\n"
+    "add x24, x24, #0x40\n"
+    "add x23, x23, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v15.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v14.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v13.4s, #0x0\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q31, [x21, x26]\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q30, [x20, x26]\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "uxtl v16.8h, v31.8b\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "movi v19.4s, #0x0\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "movi v17.4s, #0xff\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "cmp %x[n_channels], #0x10\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "smax v15.4s, v15.4s, v19.4s\n"
+    "smax v14.4s, v14.4s, v19.4s\n"
+    "smax v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v19.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "add x26, x26, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v15.4s, #0x0\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v13.4s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v12.4s, #0x0\n"
+    "cbz x22, 24f\n"
+    "15:"  // Oddments: 2 inputs loop
+    "movi v31.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "movi v30.16b, #0x0\n"
+    "add x21, x21, x26\n"
+    "add x20, x20, x26\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d31, [x21], #0x8\n"
+    "ldr d30, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "ld1 { v30.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "ld1 { v30.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "ld1 { v30.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "ld1 { v30.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "ld1 { v30.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "ld1 { v30.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "ld1 { v30.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s31, [x21], #0x4\n"
+    "ldr s30, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "ld1 { v30.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "ld1 { v30.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "ld1 { v30.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h31, [x21], #0x2\n"
+    "ldr h30, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "ld1 { v30.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b31, [x21], #0x1\n"
+    "ldr b30, [x20], #0x1\n"
+    "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v31.16b, #0x0\n"
+    "ldr x21, [x19], #0x8\n"
+    "add x21, x21, x26\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d31, [x21], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s31, [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h31, [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b31, [x21], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "uxtl v16.8h, v31.8b\n"
+    "subs x20, x20, #0x1\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "movi v19.4s, #0x0\n"
+    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "movi v17.4s, #0xff\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "smax v15.4s, v15.4s, v19.4s\n"
+    "smax v14.4s, v14.4s, v19.4s\n"
+    "smax v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v19.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..0103de8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..eac1f2d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const uint8_t *const *const inptrs;
+    uint8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const uint8_t *const *input_ptrs,
+      uint8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x19, #0x0]\n"
+    "cmp x15, #0x10\n"
+    "ldp x10, x9, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x28, x27, [x19, #0x0]\n"
+    "ldp x26, x25, [x19, #0x10]\n"
+    "ldp x24, x23, [x19, #0x20]\n"
+    "ldp x22, x21, [x19, #0x30]\n"
+    "ldr x20, [x19, #0x40]\n"
+    "blt 3f\n"
+    "lsr x19, x15, #0x4\n"
+    "sub x15, x15, x19, LSL #4\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
+    "ldr q25, [x23, x14]\n"
+    "ldr q24, [x26, x14]\n"
+    "ldr q23, [x22, x14]\n"
+    "ldr q22, [x20, x14]\n"
+    "add x14, x14, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "beq 2f\n"
+    "1:"  // Vector: Loop
+    "umax v21.16b, v30.16b, v29.16b\n"
+    "ldr q30, [x27, x14]\n"
+    "umax v20.16b, v29.16b, v28.16b\n"
+    "ldr q29, [x24, x14]\n"
+    "umax v19.16b, v27.16b, v26.16b\n"
+    "ldr q28, [x21, x14]\n"
+    "umax v18.16b, v25.16b, v24.16b\n"
+    "ldr q26, [x28, x14]\n"
+    "umax v17.16b, v23.16b, v27.16b\n"
+    "ldr q27, [x25, x14]\n"
+    "umax v16.16b, v25.16b, v22.16b\n"
+    "ldr q25, [x23, x14]\n"
+    "umax v19.16b, v21.16b, v19.16b\n"
+    "ldr q24, [x26, x14]\n"
+    "umax v18.16b, v21.16b, v18.16b\n"
+    "ldr q23, [x22, x14]\n"
+    "umax v17.16b, v20.16b, v17.16b\n"
+    "ldr q22, [x20, x14]\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "add x14, x14, #0x10\n"
+    "str q19, [x12, x13]\n"
+    "str q18, [x11, x13]\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "subs x19, x19, #0x1\n"
+    "bgt 1b\n"
+    "2:"  // Vector: Tail
+    "umax v21.16b, v30.16b, v29.16b\n"
+    "umax v20.16b, v29.16b, v28.16b\n"
+    "umax v19.16b, v27.16b, v26.16b\n"
+    "umax v18.16b, v25.16b, v24.16b\n"
+    "umax v17.16b, v23.16b, v27.16b\n"
+    "umax v16.16b, v25.16b, v22.16b\n"
+    "umax v19.16b, v21.16b, v19.16b\n"
+    "str q19, [x12, x13]\n"
+    "umax v18.16b, v21.16b, v18.16b\n"
+    "umax v17.16b, v20.16b, v17.16b\n"
+    "str q18, [x11, x13]\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "str q17, [x10, x13]\n"
+    "str q16, [x9, x13]\n"
+    "add x13, x13, #0x10\n"
+    "cbz x15, 4f\n"
+    "3:"  // Oddments
+    "ldr b30, [x27, x14]\n"
+    "ldr b29, [x24, x14]\n"
+    "umax v21.16b, v30.16b, v29.16b\n"
+    "ldr b28, [x21, x14]\n"
+    "ldr b27, [x25, x14]\n"
+    "umax v20.16b, v29.16b, v28.16b\n"
+    "ldr b26, [x28, x14]\n"
+    "ldr b25, [x23, x14]\n"
+    "umax v19.16b, v27.16b, v26.16b\n"
+    "ldr b24, [x26, x14]\n"
+    "ldr b23, [x22, x14]\n"
+    "umax v19.16b, v21.16b, v19.16b\n"
+    "ldr b22, [x20, x14]\n"
+    "add x14, x14, #0x1\n"
+    "umax v18.16b, v25.16b, v24.16b\n"
+    "subs x15, x15, #0x1\n"
+    "umax v17.16b, v23.16b, v27.16b\n"
+    "str b19, [x12, x13]\n"
+    "umax v16.16b, v25.16b, v22.16b\n"
+    "umax v18.16b, v21.16b, v18.16b\n"
+    "str b18, [x11, x13]\n"
+    "umax v17.16b, v20.16b, v17.16b\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "str b17, [x10, x13]\n"
+    "str b16, [x9, x13]\n"
+    "add x13, x13, #0x1\n"
+    "bgt 3b\n"
+    "4:"  // End
+
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..f018eca
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct a64_u8_nhwc_max_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_u8_nhwc_max_generic_depthfirst_impl;
+
+  a64_u8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..eacca15
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_u8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v5.16b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
+    "add x19, x19, #0x20\n"
+    "umax v18.16b, v29.16b, v28.16b\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v21.16b, v27.16b, v21.16b\n"
+    "umax v17.16b, v26.16b, v17.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "umax v20.16b, v25.16b, v20.16b\n"
+    "ldr q0, [x21, x28]\n"
+    "umax v16.16b, v24.16b, v16.16b\n"
+    "ldr q31, [x20, x28]\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "umax v18.16b, v22.16b, v18.16b\n"
+    "ldr q22, [x22, x27]\n"
+    "umax v17.16b, v21.16b, v17.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "ldr q28, [x20, x27]\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "ldr q27, [x23, x26]\n"
+    "umax v5.16b, v5.16b, v18.16b\n"
+    "ldr q21, [x22, x26]\n"
+    "umax v4.16b, v4.16b, v17.16b\n"
+    "ldr q26, [x21, x26]\n"
+    "umax v3.16b, v3.16b, v16.16b\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
+    "umax v18.16b, v29.16b, v28.16b\n"
+    "umax v21.16b, v27.16b, v21.16b\n"
+    "umax v17.16b, v26.16b, v17.16b\n"
+    "umax v20.16b, v25.16b, v20.16b\n"
+    "umax v16.16b, v24.16b, v16.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v18.16b, v22.16b, v18.16b\n"
+    "umax v17.16b, v21.16b, v17.16b\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "umax v5.16b, v5.16b, v18.16b\n"
+    "umax v4.16b, v4.16b, v17.16b\n"
+    "umax v3.16b, v3.16b, v16.16b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v6.16b, v6.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "umax v5.16b, v5.16b, v30.16b\n"
+    "ldr q25, [x23, x25]\n"
+    "umax v4.16b, v4.16b, v27.16b\n"
+    "umax v3.16b, v3.16b, v25.16b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "str q4, [%x[outptr], x26]\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "add x26, x26, #0x40\n"
+    "add x25, x25, #0x40\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "cmp %x[n_channels], #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v6.16b, v6.16b, v2.16b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "cmp %x[n_channels], #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v6.16b, #0x0\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 24f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
+    "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h2, [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b2, [x23], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "umax v6.16b, v6.16b, v2.16b\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..114eacf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct a64_u8q_nhwc_avg_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = a64_u8q_nhwc_avg_generic_depthfirst_impl;
+
+  a64_u8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..e2cb9d7
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,712 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void a64_u8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+
+  // Initialise the accumulators such that the offsets are subtracted for all
+  // valid inputs.
+  const int32_t accumulator_init = -qp.input_offset * n_valid_cells;
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "v16", "v17", "v18"
+  );
+
+  __asm__ __volatile__(
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n" // cntb _, ALL, #1
+    "mov x24, #0x20\n" // cntb _, ALL, #2
+    "mov x23, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "ld1r { v15.4s }, [%x[accumulator_init]]\n"
+    "mov v14.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
+    "mov v13.16b, v15.16b\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov v12.16b, v15.16b\n"
+    "mov v11.16b, v15.16b\n"
+    "mov v10.16b, v15.16b\n"
+    "mov v9.16b, v15.16b\n"
+    "mov v8.16b, v15.16b\n"
+    "mov v7.16b, v15.16b\n"
+    "mov v6.16b, v15.16b\n"
+    "mov v5.16b, v15.16b\n"
+    "mov v4.16b, v15.16b\n"
+    "mov v3.16b, v15.16b\n"
+    "mov v2.16b, v15.16b\n"
+    "mov v1.16b, v15.16b\n"
+    "mov v0.16b, v15.16b\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "ldr q24, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "ldr q31, [x21, x26]\n"
+    "uaddl2 v20.8h, v29.16b, v28.16b\n"
+    "uaddl v19.8h, v27.8b, v26.8b\n"
+    "ldr q30, [x20, x26]\n"
+    "uaddl2 v18.8h, v27.16b, v26.16b\n"
+    "ldr q29, [x21, x25]\n"
+    "uaddl v17.8h, v25.8b, v24.8b\n"
+    "ldr q28, [x20, x25]\n"
+    "uaddl2 v16.8h, v25.16b, v24.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q26, [x20, x24]\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q25, [x21, x23]\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "ldr q24, [x20, x23]\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "uaddl2 v20.8h, v29.16b, v28.16b\n"
+    "uaddl v19.8h, v27.8b, v26.8b\n"
+    "uaddl2 v18.8h, v27.16b, v26.16b\n"
+    "uaddl v17.8h, v25.8b, v24.8b\n"
+    "uaddl2 v16.8h, v25.16b, v24.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "uxtl v16.8h, v31.8b\n"
+    "ldr q29, [x21, x25]\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "ldr q27, [x21, x24]\n"
+    "ldr q25, [x21, x23]\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uxtl v21.8h, v29.8b\n"
+    "uxtl2 v20.8h, v29.16b\n"
+    "uxtl v19.8h, v27.8b\n"
+    "uxtl2 v18.8h, v27.16b\n"
+    "uxtl v17.8h, v25.8b\n"
+    "uxtl2 v16.8h, v25.16b\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "uaddw v11.4s, v11.4s, v21.4h\n"
+    "uaddw2 v10.4s, v10.4s, v21.8h\n"
+    "uaddw v9.4s, v9.4s, v20.4h\n"
+    "uaddw2 v8.4s, v8.4s, v20.8h\n"
+    "uaddw v7.4s, v7.4s, v19.4h\n"
+    "uaddw2 v6.4s, v6.4s, v19.8h\n"
+    "uaddw v5.4s, v5.4s, v18.4h\n"
+    "uaddw2 v4.4s, v4.4s, v18.8h\n"
+    "uaddw v3.4s, v3.4s, v17.4h\n"
+    "uaddw2 v2.4s, v2.4s, v17.8h\n"
+    "uaddw v1.4s, v1.4s, v16.4h\n"
+    "uaddw2 v0.4s, v0.4s, v16.8h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "movi v21.4s, #0x0\n"
+    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "movi v19.4s, #0xff\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "cmp %x[n_channels], #0x40\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "srshl v11.4s, v11.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+    "srshl v10.4s, v10.4s, v18.4s\n"
+    "srshl v9.4s, v9.4s, v18.4s\n"
+    "srshl v8.4s, v8.4s, v18.4s\n"
+    "srshl v11.4s, v11.4s, v17.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v20.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v20.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+    "srshl v7.4s, v7.4s, v18.4s\n"
+    "srshl v10.4s, v10.4s, v17.4s\n"
+    "srshl v9.4s, v9.4s, v17.4s\n"
+    "srshl v8.4s, v8.4s, v17.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+    "srshl v6.4s, v6.4s, v18.4s\n"
+    "srshl v5.4s, v5.4s, v18.4s\n"
+    "srshl v4.4s, v4.4s, v18.4s\n"
+    "srshl v7.4s, v7.4s, v17.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v20.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v20.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v20.4s\n"
+    "srshl v3.4s, v3.4s, v18.4s\n"
+    "srshl v6.4s, v6.4s, v17.4s\n"
+    "srshl v5.4s, v5.4s, v17.4s\n"
+    "srshl v4.4s, v4.4s, v17.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+    "srshl v2.4s, v2.4s, v18.4s\n"
+    "srshl v1.4s, v1.4s, v18.4s\n"
+    "srshl v0.4s, v0.4s, v18.4s\n"
+    "srshl v3.4s, v3.4s, v17.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v20.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v20.4s\n"
+    "add v15.4s, v15.4s, v16.4s\n"
+    "srshl v2.4s, v2.4s, v17.4s\n"
+    "srshl v1.4s, v1.4s, v17.4s\n"
+    "srshl v0.4s, v0.4s, v17.4s\n"
+    "add v14.4s, v14.4s, v16.4s\n"
+    "add v13.4s, v13.4s, v16.4s\n"
+    "add v12.4s, v12.4s, v16.4s\n"
+    "add v11.4s, v11.4s, v16.4s\n"
+    "add v10.4s, v10.4s, v16.4s\n"
+    "add v9.4s, v9.4s, v16.4s\n"
+    "add v8.4s, v8.4s, v16.4s\n"
+    "add v7.4s, v7.4s, v16.4s\n"
+    "add v6.4s, v6.4s, v16.4s\n"
+    "add v5.4s, v5.4s, v16.4s\n"
+    "add v4.4s, v4.4s, v16.4s\n"
+    "add v3.4s, v3.4s, v16.4s\n"
+    "add v2.4s, v2.4s, v16.4s\n"
+    "add v1.4s, v1.4s, v16.4s\n"
+    "add v0.4s, v0.4s, v16.4s\n"
+    "smax v15.4s, v15.4s, v21.4s\n"
+    "smax v14.4s, v14.4s, v21.4s\n"
+    "smax v13.4s, v13.4s, v21.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v21.4s\n"
+    "smax v11.4s, v11.4s, v21.4s\n"
+    "smax v10.4s, v10.4s, v21.4s\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "smin v11.4s, v11.4s, v19.4s\n"
+    "smin v10.4s, v10.4s, v19.4s\n"
+    "smax v9.4s, v9.4s, v21.4s\n"
+    "smax v8.4s, v8.4s, v21.4s\n"
+    "smax v7.4s, v7.4s, v21.4s\n"
+    "smin v9.4s, v9.4s, v19.4s\n"
+    "smin v8.4s, v8.4s, v19.4s\n"
+    "smin v7.4s, v7.4s, v19.4s\n"
+    "smax v6.4s, v6.4s, v21.4s\n"
+    "smax v5.4s, v5.4s, v21.4s\n"
+    "smax v4.4s, v4.4s, v21.4s\n"
+    "smin v6.4s, v6.4s, v19.4s\n"
+    "smin v5.4s, v5.4s, v19.4s\n"
+    "smin v4.4s, v4.4s, v19.4s\n"
+    "smax v3.4s, v3.4s, v21.4s\n"
+    "smax v2.4s, v2.4s, v21.4s\n"
+    "smax v1.4s, v1.4s, v21.4s\n"
+    "smin v3.4s, v3.4s, v19.4s\n"
+    "smin v2.4s, v2.4s, v19.4s\n"
+    "smin v1.4s, v1.4s, v19.4s\n"
+    "smax v0.4s, v0.4s, v21.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "smin v0.4s, v0.4s, v19.4s\n"
+    "uzp1 v22.16b, v11.16b, v10.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
+    "uzp1 v17.16b, v5.16b, v4.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "add x26, x26, #0x40\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
+    "str q17, [%x[outptr], x24]\n"
+    "str q16, [%x[outptr], x23]\n"
+    "add x25, x25, #0x40\n"
+    "add x24, x24, #0x40\n"
+    "add x23, x23, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "ld1r { v15.4s }, [%x[accumulator_init]]\n"
+    "mov v14.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
+    "mov v13.16b, v15.16b\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov v12.16b, v15.16b\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "subs x22, x22, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldr q31, [x21, x26]\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "ldr q30, [x20, x26]\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q31, [x21, x26]\n"
+    "uxtl v16.8h, v31.8b\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "movi v21.4s, #0x0\n"
+    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "movi v19.4s, #0xff\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "cmp %x[n_channels], #0x10\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "add v15.4s, v15.4s, v16.4s\n"
+    "add v14.4s, v14.4s, v16.4s\n"
+    "add v13.4s, v13.4s, v16.4s\n"
+    "add v12.4s, v12.4s, v16.4s\n"
+    "smax v15.4s, v15.4s, v21.4s\n"
+    "smax v14.4s, v14.4s, v21.4s\n"
+    "smax v13.4s, v13.4s, v21.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v21.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "add x26, x26, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "ld1r { v15.4s }, [%x[accumulator_init]]\n"
+    "mov v14.16b, v15.16b\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "mov v13.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
+    "mov v12.16b, v15.16b\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "cbz x22, 24f\n"
+    "15:"  // Oddments: 2 inputs loop
+    "movi v31.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    "movi v30.16b, #0x0\n"
+    "add x21, x21, x26\n"
+    "add x20, x20, x26\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d31, [x21], #0x8\n"
+    "ldr d30, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "ld1 { v30.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "ld1 { v30.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "ld1 { v30.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "ld1 { v30.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "ld1 { v30.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "ld1 { v30.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "ld1 { v30.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s31, [x21], #0x4\n"
+    "ldr s30, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "ld1 { v30.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "ld1 { v30.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "ld1 { v30.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h31, [x21], #0x2\n"
+    "ldr h30, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "ld1 { v30.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b31, [x21], #0x1\n"
+    "ldr b30, [x20], #0x1\n"
+    "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
+    "uaddl v23.8h, v31.8b, v30.8b\n"
+    "subs x22, x22, #0x1\n"
+    "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "uaddw2 v14.4s, v14.4s, v23.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v31.16b, #0x0\n"
+    "ldr x21, [x19], #0x8\n"
+    "add x21, x21, x26\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d31, [x21], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v31.s }[2], [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v31.h }[6], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[14], [x21], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[12], [x21], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v31.h }[4], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[10], [x21], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[8], [x21], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s31, [x21], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v31.h }[2], [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[6], [x21], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[4], [x21], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h31, [x21], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v31.b }[2], [x21], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b31, [x21], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "uxtl v16.8h, v31.8b\n"
+    "subs x20, x20, #0x1\n"
+    "uxtl2 v22.8h, v31.16b\n"
+    "uaddw v15.4s, v15.4s, v16.4h\n"
+    "uaddw2 v14.4s, v14.4s, v16.8h\n"
+    "uaddw v13.4s, v13.4s, v22.4h\n"
+    "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "movi v21.4s, #0x0\n"
+    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "movi v19.4s, #0xff\n"
+    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "ld1r { v17.4s }, [%x[right_shift]]\n"
+    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "srshl v14.4s, v14.4s, v18.4s\n"
+    "srshl v13.4s, v13.4s, v18.4s\n"
+    "srshl v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "srshl v15.4s, v15.4s, v17.4s\n"
+    "srshl v14.4s, v14.4s, v17.4s\n"
+    "srshl v13.4s, v13.4s, v17.4s\n"
+    "srshl v12.4s, v12.4s, v17.4s\n"
+    "add v15.4s, v15.4s, v16.4s\n"
+    "add v14.4s, v14.4s, v16.4s\n"
+    "add v13.4s, v13.4s, v16.4s\n"
+    "add v12.4s, v12.4s, v16.4s\n"
+    "smax v15.4s, v15.4s, v21.4s\n"
+    "smax v14.4s, v14.4s, v21.4s\n"
+    "smax v13.4s, v13.4s, v21.4s\n"
+    "smin v15.4s, v15.4s, v19.4s\n"
+    "smin v14.4s, v14.4s, v19.4s\n"
+    "smin v13.4s, v13.4s, v19.4s\n"
+    "smax v12.4s, v12.4s, v21.4s\n"
+    "uzp1 v23.16b, v15.16b, v14.16b\n"
+    "smin v12.4s, v12.4s, v19.4s\n"
+    "uzp1 v16.16b, v13.16b, v12.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..166f3fa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+void a64_u8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct a64_u8q_nhwc_max_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = a64_u8q_nhwc_max_generic_depthfirst_impl;
+
+  a64_u8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..b056be2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void a64_u8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n" // cntb _, ALL, #1
+    "mov x26, #0x20\n" // cntb _, ALL, #2
+    "mov x25, #0x30\n" // cntb _, ALL, #3
+    "cmp %x[n_channels], #0x40\n"
+    "blt 7f\n"
+    "1:"  // 4-vectors of channels
+    "movi v4.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "movi v3.16b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v7.16b, #0x0\n"
+    "movi v6.16b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "ldr q21, [x22, x26]\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
+    "add x19, x19, #0x20\n"
+    "umax v18.16b, v29.16b, v28.16b\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v21.16b, v27.16b, v21.16b\n"
+    "umax v17.16b, v26.16b, v17.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "umax v20.16b, v25.16b, v20.16b\n"
+    "ldr q0, [x21, x28]\n"
+    "umax v16.16b, v24.16b, v16.16b\n"
+    "ldr q31, [x20, x28]\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "umax v18.16b, v22.16b, v18.16b\n"
+    "ldr q22, [x22, x27]\n"
+    "umax v17.16b, v21.16b, v17.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "ldr q28, [x20, x27]\n"
+    "umax v4.16b, v4.16b, v19.16b\n"
+    "ldr q27, [x23, x26]\n"
+    "umax v3.16b, v3.16b, v18.16b\n"
+    "ldr q21, [x22, x26]\n"
+    "umax v7.16b, v7.16b, v17.16b\n"
+    "ldr q26, [x21, x26]\n"
+    "umax v6.16b, v6.16b, v16.16b\n"
+    "ldr q17, [x20, x26]\n"
+    "ldr q25, [x23, x25]\n"
+    "ldr q20, [x22, x25]\n"
+    "ldr q24, [x21, x25]\n"
+    "ldr q16, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
+    "umax v18.16b, v29.16b, v28.16b\n"
+    "umax v21.16b, v27.16b, v21.16b\n"
+    "umax v17.16b, v26.16b, v17.16b\n"
+    "umax v20.16b, v25.16b, v20.16b\n"
+    "umax v16.16b, v24.16b, v16.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v18.16b, v22.16b, v18.16b\n"
+    "umax v17.16b, v21.16b, v17.16b\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "umax v4.16b, v4.16b, v19.16b\n"
+    "umax v3.16b, v3.16b, v18.16b\n"
+    "umax v7.16b, v7.16b, v17.16b\n"
+    "umax v6.16b, v6.16b, v16.16b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v4.16b, v4.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q27, [x23, x26]\n"
+    "umax v3.16b, v3.16b, v30.16b\n"
+    "ldr q25, [x23, x25]\n"
+    "umax v7.16b, v7.16b, v27.16b\n"
+    "umax v6.16b, v6.16b, v25.16b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "uxtl v17.8h, v4.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1r { v5.4s }, [x19]\n"
+    "uxtl2 v16.8h, v4.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "uxtl v21.8h, v3.8b\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "uxtl2 v20.8h, v3.16b\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "uxtl v19.8h, v7.8b\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "uxtl2 v24.8h, v7.16b\n"
+    "ld1r { v1.4s }, [x19]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "uxtl v0.8h, v6.8b\n"
+    "cmp %x[n_channels], #0x40\n"
+    "uxtl2 v31.8h, v6.16b\n"
+    "neg v5.4s, v5.4s\n"
+    "movi v30.4s, #0x0\n"
+    "movi v29.4s, #0xff\n"
+    "saddw v23.4s, v5.4s, v17.4h\n"
+    "saddw2 v18.4s, v5.4s, v17.8h\n"
+    "saddw v17.4s, v5.4s, v16.4h\n"
+    "saddw2 v16.4s, v5.4s, v16.8h\n"
+    "saddw v22.4s, v5.4s, v21.4h\n"
+    "saddw2 v21.4s, v5.4s, v21.8h\n"
+    "saddw v28.4s, v5.4s, v20.4h\n"
+    "saddw2 v20.4s, v5.4s, v20.8h\n"
+    "saddw v27.4s, v5.4s, v19.4h\n"
+    "saddw2 v19.4s, v5.4s, v19.8h\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "srshl v17.4s, v17.4s, v3.4s\n"
+    "srshl v16.4s, v16.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
+    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
+    "srshl v17.4s, v17.4s, v2.4s\n"
+    "srshl v16.4s, v16.4s, v2.4s\n"
+    "srshl v22.4s, v22.4s, v3.4s\n"
+    "srshl v21.4s, v21.4s, v3.4s\n"
+    "srshl v28.4s, v28.4s, v3.4s\n"
+    "srshl v20.4s, v20.4s, v3.4s\n"
+    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
+    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+    "sqrdmulh v28.4s, v28.4s, v4.4s\n"
+    "sqrdmulh v20.4s, v20.4s, v4.4s\n"
+    "srshl v22.4s, v22.4s, v2.4s\n"
+    "srshl v21.4s, v21.4s, v2.4s\n"
+    "srshl v28.4s, v28.4s, v2.4s\n"
+    "srshl v20.4s, v20.4s, v2.4s\n"
+    "srshl v27.4s, v27.4s, v3.4s\n"
+    "srshl v19.4s, v19.4s, v3.4s\n"
+    "add v23.4s, v23.4s, v1.4s\n"
+    "add v18.4s, v18.4s, v1.4s\n"
+    "sqrdmulh v27.4s, v27.4s, v4.4s\n"
+    "sqrdmulh v19.4s, v19.4s, v4.4s\n"
+    "add v17.4s, v17.4s, v1.4s\n"
+    "add v16.4s, v16.4s, v1.4s\n"
+    "srshl v27.4s, v27.4s, v2.4s\n"
+    "srshl v19.4s, v19.4s, v2.4s\n"
+    "add v22.4s, v22.4s, v1.4s\n"
+    "add v21.4s, v21.4s, v1.4s\n"
+    "add v28.4s, v28.4s, v1.4s\n"
+    "add v20.4s, v20.4s, v1.4s\n"
+    "add v27.4s, v27.4s, v1.4s\n"
+    "add v19.4s, v19.4s, v1.4s\n"
+    "smax v23.4s, v23.4s, v30.4s\n"
+    "smax v18.4s, v18.4s, v30.4s\n"
+    "smax v17.4s, v17.4s, v30.4s\n"
+    "smin v23.4s, v23.4s, v29.4s\n"
+    "smin v18.4s, v18.4s, v29.4s\n"
+    "smin v17.4s, v17.4s, v29.4s\n"
+    "smax v16.4s, v16.4s, v30.4s\n"
+    "smax v22.4s, v22.4s, v30.4s\n"
+    "smax v21.4s, v21.4s, v30.4s\n"
+    "smin v16.4s, v16.4s, v29.4s\n"
+    "smin v22.4s, v22.4s, v29.4s\n"
+    "smin v21.4s, v21.4s, v29.4s\n"
+    "smax v28.4s, v28.4s, v30.4s\n"
+    "smax v20.4s, v20.4s, v30.4s\n"
+    "smax v27.4s, v27.4s, v30.4s\n"
+    "smin v28.4s, v28.4s, v29.4s\n"
+    "smin v20.4s, v20.4s, v29.4s\n"
+    "smin v27.4s, v27.4s, v29.4s\n"
+    "smax v19.4s, v19.4s, v30.4s\n"
+    "uzp1 v26.16b, v23.16b, v18.16b\n"
+    "saddw v25.4s, v5.4s, v24.4h\n"
+    "saddw2 v18.4s, v5.4s, v24.8h\n"
+    "smin v19.4s, v19.4s, v29.4s\n"
+    "srshl v25.4s, v25.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "uzp1 v24.16b, v17.16b, v16.16b\n"
+    "saddw v17.4s, v5.4s, v0.4h\n"
+    "saddw2 v16.4s, v5.4s, v0.8h\n"
+    "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+    "srshl v17.4s, v17.4s, v3.4s\n"
+    "srshl v16.4s, v16.4s, v3.4s\n"
+    "srshl v25.4s, v25.4s, v2.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
+    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
+    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+    "add v25.4s, v25.4s, v1.4s\n"
+    "add v18.4s, v18.4s, v1.4s\n"
+    "srshl v17.4s, v17.4s, v2.4s\n"
+    "srshl v16.4s, v16.4s, v2.4s\n"
+    "smax v25.4s, v25.4s, v30.4s\n"
+    "smax v18.4s, v18.4s, v30.4s\n"
+    "add v17.4s, v17.4s, v1.4s\n"
+    "add v16.4s, v16.4s, v1.4s\n"
+    "smin v25.4s, v25.4s, v29.4s\n"
+    "smin v18.4s, v18.4s, v29.4s\n"
+    "smax v17.4s, v17.4s, v30.4s\n"
+    "smax v16.4s, v16.4s, v30.4s\n"
+    "uzp1 v23.16b, v22.16b, v21.16b\n"
+    "saddw v22.4s, v5.4s, v31.4h\n"
+    "saddw2 v21.4s, v5.4s, v31.8h\n"
+    "smin v17.4s, v17.4s, v29.4s\n"
+    "srshl v22.4s, v22.4s, v3.4s\n"
+    "srshl v21.4s, v21.4s, v3.4s\n"
+    "smin v16.4s, v16.4s, v29.4s\n"
+    "uzp1 v20.16b, v28.16b, v20.16b\n"
+    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
+    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+    "uzp1 v19.16b, v27.16b, v19.16b\n"
+    "uzp1 v18.16b, v25.16b, v18.16b\n"
+    "srshl v22.4s, v22.4s, v2.4s\n"
+    "srshl v21.4s, v21.4s, v2.4s\n"
+    "uzp1 v17.16b, v17.16b, v16.16b\n"
+    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "str q16, [%x[outptr], x28]\n"
+    "add v22.4s, v22.4s, v1.4s\n"
+    "add v21.4s, v21.4s, v1.4s\n"
+    "add x28, x28, #0x40\n"
+    "uzp1 v16.16b, v23.16b, v20.16b\n"
+    "str q16, [%x[outptr], x27]\n"
+    "smax v22.4s, v22.4s, v30.4s\n"
+    "smax v21.4s, v21.4s, v30.4s\n"
+    "add x27, x27, #0x40\n"
+    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "str q16, [%x[outptr], x26]\n"
+    "smin v22.4s, v22.4s, v29.4s\n"
+    "smin v21.4s, v21.4s, v29.4s\n"
+    "add x26, x26, #0x40\n"
+    "uzp1 v16.16b, v22.16b, v21.16b\n"
+    "uzp1 v16.16b, v17.16b, v16.16b\n"
+    "str q16, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
+    "bge 1b\n"
+    "cbz %x[n_channels], 43f\n"
+    "7:"  // Single vector of channels
+    "cmp %x[n_channels], #0x10\n"
+    "blt 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "movi v4.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v4.16b, v4.16b, v19.16b\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v4.16b, v4.16b, v19.16b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ldr q2, [x23, x28]\n"
+    "umax v4.16b, v4.16b, v2.16b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "uxtl v17.8h, v4.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1r { v5.4s }, [x19]\n"
+    "uxtl2 v16.8h, v4.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "movi v30.4s, #0x0\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "movi v29.4s, #0xff\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "neg v5.4s, v5.4s\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "saddw v23.4s, v5.4s, v17.4h\n"
+    "ld1r { v1.4s }, [x19]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "saddw2 v18.4s, v5.4s, v17.8h\n"
+    "cmp %x[n_channels], #0x10\n"
+    "saddw v17.4s, v5.4s, v16.4h\n"
+    "saddw2 v16.4s, v5.4s, v16.8h\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "srshl v17.4s, v17.4s, v3.4s\n"
+    "srshl v16.4s, v16.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
+    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
+    "srshl v17.4s, v17.4s, v2.4s\n"
+    "srshl v16.4s, v16.4s, v2.4s\n"
+    "add v23.4s, v23.4s, v1.4s\n"
+    "add v18.4s, v18.4s, v1.4s\n"
+    "add v17.4s, v17.4s, v1.4s\n"
+    "add v16.4s, v16.4s, v1.4s\n"
+    "smax v23.4s, v23.4s, v30.4s\n"
+    "smax v18.4s, v18.4s, v30.4s\n"
+    "smax v17.4s, v17.4s, v30.4s\n"
+    "smin v23.4s, v23.4s, v29.4s\n"
+    "smin v18.4s, v18.4s, v29.4s\n"
+    "smin v17.4s, v17.4s, v29.4s\n"
+    "smax v16.4s, v16.4s, v30.4s\n"
+    "uzp1 v26.16b, v23.16b, v18.16b\n"
+    "smin v16.4s, v16.4s, v29.4s\n"
+    "uzp1 v24.16b, v17.16b, v16.16b\n"
+    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "str q16, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
+    "bge 8b\n"
+    "cbz %x[n_channels], 43f\n"
+    "14:"  // Oddments
+    "movi v4.16b, #0x0\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 24f\n"
+    "15:"  // Oddments: 4 inputs loop
+    "movi v2.16b, #0x0\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "add x23, x23, x28\n"
+    "movi v1.16b, #0x0\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movi v0.16b, #0x0\n"
+    "add x19, x19, #0x20\n"
+    "movi v31.16b, #0x0\n"
+    "add x22, x22, x28\n"
+    "add x21, x21, x28\n"
+    "add x20, x20, x28\n"
+    "tbz %x[n_channels], #3, 19f\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
+    "tbz %x[n_channels], #2, 17f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 16f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
+    "b 23f\n"
+    "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
+    "b 23f\n"
+    "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 18f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
+    "b 23f\n"
+    "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
+    "b 23f\n"
+    "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 21f\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
+    "tbz %x[n_channels], #1, 20f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
+    "b 23f\n"
+    "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
+    "b 23f\n"
+    "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 22f\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
+    "tbz %x[n_channels], #0, 23f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
+    "b 23f\n"
+    "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 23f\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
+    "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "subs x24, x24, #0x1\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v19.16b, v23.16b, v19.16b\n"
+    "umax v4.16b, v4.16b, v19.16b\n"
+    "bgt 15b\n"
+    "24:"  // Oddments: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 34f\n"
+    "25:"  // Oddments: Single input loop
+    "movi v2.16b, #0x0\n"
+    "ldr x23, [x19], #0x8\n"
+    "add x23, x23, x28\n"
+    "tbz %x[n_channels], #3, 29f\n"
+    "ldr d2, [x23], #0x8\n"
+    "tbz %x[n_channels], #2, 27f\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 26f\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "b 33f\n"
+    "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "b 33f\n"
+    "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 28f\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "b 33f\n"
+    "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "b 33f\n"
+    "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
+    "tbz %x[n_channels], #2, 31f\n"
+    "ldr s2, [x23], #0x4\n"
+    "tbz %x[n_channels], #1, 30f\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "b 33f\n"
+    "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "b 33f\n"
+    "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 32f\n"
+    "ldr h2, [x23], #0x2\n"
+    "tbz %x[n_channels], #0, 33f\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "b 33f\n"
+    "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 33f\n"
+    "ldr b2, [x23], #0x1\n"
+    "33:"  // Oddments: Single input loop: Load: Bit 3: End
+    "umax v4.16b, v4.16b, v2.16b\n"
+    "subs x20, x20, #0x1\n"
+    "bgt 25b\n"
+    "34:"  // Oddments: Single input loop: End
+    "uxtl v17.8h, v4.8b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1r { v5.4s }, [x19]\n"
+    "uxtl2 v16.8h, v4.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "movi v30.4s, #0x0\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "movi v29.4s, #0xff\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "neg v5.4s, v5.4s\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "saddw v23.4s, v5.4s, v17.4h\n"
+    "ld1r { v1.4s }, [x19]\n"
+    "saddw2 v18.4s, v5.4s, v17.8h\n"
+    "saddw v17.4s, v5.4s, v16.4h\n"
+    "saddw2 v16.4s, v5.4s, v16.8h\n"
+    "srshl v23.4s, v23.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "srshl v17.4s, v17.4s, v3.4s\n"
+    "srshl v16.4s, v16.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
+    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v2.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
+    "srshl v17.4s, v17.4s, v2.4s\n"
+    "srshl v16.4s, v16.4s, v2.4s\n"
+    "add v23.4s, v23.4s, v1.4s\n"
+    "add v18.4s, v18.4s, v1.4s\n"
+    "add v17.4s, v17.4s, v1.4s\n"
+    "add v16.4s, v16.4s, v1.4s\n"
+    "smax v23.4s, v23.4s, v30.4s\n"
+    "smax v18.4s, v18.4s, v30.4s\n"
+    "smax v17.4s, v17.4s, v30.4s\n"
+    "smin v23.4s, v23.4s, v29.4s\n"
+    "smin v18.4s, v18.4s, v29.4s\n"
+    "smin v17.4s, v17.4s, v29.4s\n"
+    "smax v16.4s, v16.4s, v30.4s\n"
+    "uzp1 v26.16b, v23.16b, v18.16b\n"
+    "smin v16.4s, v16.4s, v29.4s\n"
+    "uzp1 v24.16b, v17.16b, v16.16b\n"
+    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "tbz %x[n_channels], #3, 38f\n"
+    "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
+    "tbz %x[n_channels], #2, 36f\n"
+    "st1 { v16.s }[2], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 35f\n"
+    "st1 { v16.h }[6], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[14], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[12], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
+    "tbz %x[n_channels], #1, 37f\n"
+    "st1 { v16.h }[4], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[10], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[8], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "38:"  // Oddments: Store: Bit 3: Unset
+    "tbz %x[n_channels], #2, 40f\n"
+    "st1 { v16.s }[0], [%x[outptr]], #0x4\n"
+    "tbz %x[n_channels], #1, 39f\n"
+    "st1 { v16.h }[2], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[6], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[4], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
+    "tbz %x[n_channels], #1, 41f\n"
+    "st1 { v16.h }[0], [%x[outptr]], #0x2\n"
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[2], [%x[outptr]], #0x1\n"
+    "b 42f\n"
+    "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+    "tbz %x[n_channels], #0, 42f\n"
+    "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
+    "42:"  // Oddments: Store: Bit 3: End
+
+    "43:"  // End
+
+    : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
+    : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp
new file mode 100644
index 0000000..38c70b2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+namespace arm_conv {
+namespace pooling {
+
+template <typename T>
+void cpp_nhwc_1x1_stride_any_depthfirst_impl(const uint64_t, const uint64_t, uint64_t n_channels, const T *const *const inptrs, T *outptr);
+
+template <typename T>
+struct cpp_nhwc_1x1_stride_any_depthfirst
+{
+  typedef T operand_type;
+  typedef T return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t, uint64_t n_channels, const operand_type *const *const inptrs, return_type *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = cpp_nhwc_1x1_stride_any_depthfirst_impl;
+
+  cpp_nhwc_1x1_stride_any_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst/generic.cpp
new file mode 100644
index 0000000..f2df723
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst/generic.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+#include <cstring>
+
+namespace arm_conv {
+namespace pooling {
+
+template <typename T>
+void cpp_nhwc_1x1_stride_any_depthfirst_impl(
+  uint64_t,
+  uint64_t,
+  const uint64_t n_channels,
+  const T *const *const inptrs,
+  T *outptr
+)
+{
+  std::memcpy(outptr, inptrs[0], n_channels * sizeof(T));
+}
+
+template void cpp_nhwc_1x1_stride_any_depthfirst_impl(uint64_t, uint64_t, uint64_t, const float *const *, float *);
+#if defined(__ARM_FP16_ARGS)
+template void cpp_nhwc_1x1_stride_any_depthfirst_impl(uint64_t, uint64_t, uint64_t, const __fp16 *const *, __fp16 *);
+#endif  // defined(__ARM_FP16_ARGS)
+template void cpp_nhwc_1x1_stride_any_depthfirst_impl(uint64_t, uint64_t, uint64_t, const int8_t *const *, int8_t *);
+template void cpp_nhwc_1x1_stride_any_depthfirst_impl(uint64_t, uint64_t, uint64_t, const uint8_t *const *, uint8_t *);
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..8c7a497
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  constexpr static unsigned int pool_rows(void) { return 3; }
+  constexpr static unsigned int pool_cols(void) { return 3; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
+
+  sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..7464349
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    __fp16 rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<__fp16>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x4, #0x0\n"
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x5, #0x0\n"
+    "mov x19, #0x4\n"
+    "ldp x6, x7, [x20, #0x0]\n"
+    "whilelt p0.h, XZR, x19\n"
+    "add x8, %x[args], %[offsetof_rescale]\n"
+    "ldp x17, x16, [x20, #0x10]\n"
+    "whilelt p1.h, x4, x3\n"
+    "ldr x15, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "ldp x10, x9, [x15, #0x20]\n"
+    "ldp x28, x27, [x15, #0x30]\n"
+    "ldp x26, x25, [x15, #0x40]\n"
+    "ldp x24, x23, [x15, #0x50]\n"
+    "ldp x22, x21, [x15, #0x60]\n"
+    "ldp x20, x19, [x15, #0x70]\n"
+    "ld1rqh { z7.h }, p0/Z, [x8]\n"
+    "ld1h { z8.h }, p1/Z, [x9, x4, LSL #1]\n"
+    "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
+    "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
+    "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
+    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+    "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+    "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
+    "ld1h { z0.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "ld1h { z31.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "ld1h { z30.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "ld1h { z29.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x14, x4, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "incw x4\n"
+    "whilelt p1.h, x4, x3\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "fadd z17.h, z8.h, z6.h\n"
+    "ld1h { z8.h }, p1/Z, [x9, x4, LSL #1]\n"
+    "whilelt p0.h, x5, x3\n"
+    "fadd z16.h, z5.h, z4.h\n"
+    "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
+    "fadd z18.h, z3.h, z2.h\n"
+    "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
+    "fadd z23.h, z1.h, z0.h\n"
+    "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
+    "fadd z22.h, z31.h, z30.h\n"
+    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+    "fadd z17.h, z17.h, z16.h\n"
+    "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+    "fadd z16.h, z29.h, z28.h\n"
+    "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
+    "fadd z19.h, z27.h, z23.h\n"
+    "ld1h { z0.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "fadd z21.h, z18.h, z17.h\n"
+    "ld1h { z31.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "fadd z20.h, z16.h, z17.h\n"
+    "ld1h { z30.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "fadd z18.h, z26.h, z22.h\n"
+    "ld1h { z29.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "fadd z17.h, z25.h, z23.h\n"
+    "ld1h { z28.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "fadd z16.h, z24.h, z22.h\n"
+    "ld1h { z27.h }, p1/Z, [x14, x4, LSL #1]\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "ld1h { z26.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "fadd z18.h, z18.h, z21.h\n"
+    "ld1h { z25.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "fadd z17.h, z17.h, z20.h\n"
+    "ld1h { z24.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "incw x4\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "whilelt p1.h, x4, x3\n"
+    "fmul z19.h, z19.h, z7.h[0]\n"
+    "st1h { z19.h }, p0, [x6, x5, LSL #1]\n"
+    "fmul z18.h, z18.h, z7.h[1]\n"
+    "fmul z17.h, z17.h, z7.h[2]\n"
+    "st1h { z18.h }, p0, [x7, x5, LSL #1]\n"
+    "fmul z16.h, z16.h, z7.h[3]\n"
+    "st1h { z17.h }, p0, [x17, x5, LSL #1]\n"
+    "st1h { z16.h }, p0, [x16, x5, LSL #1]\n"
+    "incw x5\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "fadd z17.h, z8.h, z6.h\n"
+    "whilelt p0.h, x5, x3\n"
+    "fadd z16.h, z5.h, z4.h\n"
+    "fadd z18.h, z3.h, z2.h\n"
+    "fadd z23.h, z1.h, z0.h\n"
+    "fadd z17.h, z17.h, z16.h\n"
+    "fadd z22.h, z31.h, z30.h\n"
+    "fadd z16.h, z29.h, z28.h\n"
+    "fadd z21.h, z18.h, z17.h\n"
+    "fadd z19.h, z27.h, z23.h\n"
+    "fadd z20.h, z16.h, z17.h\n"
+    "fadd z18.h, z26.h, z22.h\n"
+    "fadd z17.h, z25.h, z23.h\n"
+    "fadd z16.h, z24.h, z22.h\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "fadd z18.h, z18.h, z21.h\n"
+    "fadd z17.h, z17.h, z20.h\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "fmul z19.h, z19.h, z7.h[0]\n"
+    "st1h { z19.h }, p0, [x6, x5, LSL #1]\n"
+    "fmul z18.h, z18.h, z7.h[1]\n"
+    "fmul z17.h, z17.h, z7.h[2]\n"
+    "st1h { z18.h }, p0, [x7, x5, LSL #1]\n"
+    "fmul z16.h, z16.h, z7.h[3]\n"
+    "st1h { z17.h }, p0, [x17, x5, LSL #1]\n"
+    "st1h { z16.h }, p0, [x16, x5, LSL #1]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..33ee25c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct sve_fp16_nhwc_avg_generic_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_fp16_nhwc_avg_generic_depthfirst_impl;
+
+  sve_fp16_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..20293c0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_fp16_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    "ptrue p0.b\n"
+    "ld1rh { z7.h }, p0/Z, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "cnth x27\n"
+    "cnth x26, ALL, MUL #2\n"
+    "cnth x25, ALL, MUL #3\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "whilelt p2.h, x27, %x[n_channels]\n"
+    "whilelt p1.h, x26, %x[n_channels]\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z5.b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x0\n"
+    "mov z3.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd z23.h, z2.h, z1.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.h, z0.h, z31.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z22.h, z30.h, z22.h\n"
+    "add x19, x19, #0x20\n"
+    "fadd z18.h, z29.h, z28.h\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z21.h, z27.h, z21.h\n"
+    "fadd z17.h, z26.h, z17.h\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "fadd z20.h, z25.h, z20.h\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "fadd z16.h, z24.h, z16.h\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "fadd z18.h, z22.h, z18.h\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "fadd z17.h, z21.h, z17.h\n"
+    "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "fadd z5.h, z5.h, z18.h\n"
+    "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "fadd z4.h, z4.h, z17.h\n"
+    "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "fadd z3.h, z3.h, z16.h\n"
+    "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
+    "fadd z22.h, z30.h, z22.h\n"
+    "fadd z18.h, z29.h, z28.h\n"
+    "fadd z21.h, z27.h, z21.h\n"
+    "fadd z17.h, z26.h, z17.h\n"
+    "fadd z20.h, z25.h, z20.h\n"
+    "fadd z16.h, z24.h, z16.h\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "fadd z18.h, z22.h, z18.h\n"
+    "fadd z17.h, z21.h, z17.h\n"
+    "fadd z16.h, z20.h, z16.h\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "fadd z5.h, z5.h, z18.h\n"
+    "fadd z4.h, z4.h, z17.h\n"
+    "fadd z3.h, z3.h, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z6.h, z6.h, z2.h\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "fadd z5.h, z5.h, z30.h\n"
+    "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "fadd z4.h, z4.h, z27.h\n"
+    "fadd z3.h, z3.h, z25.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul z6.h, z6.h, z7.h\n"
+    "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "fmul z5.h, z5.h, z7.h\n"
+    "inch x28, ALL, MUL #4\n"
+    "fmul z4.h, z4.h, z7.h\n"
+    "st1h { z5.h }, p2, [%x[outptr], x27, LSL #1]\n"
+    "fmul z3.h, z3.h, z7.h\n"
+    "inch x27, ALL, MUL #4\n"
+    "st1h { z4.h }, p1, [%x[outptr], x26, LSL #1]\n"
+    "inch x26, ALL, MUL #4\n"
+    "st1h { z3.h }, p0, [%x[outptr], x25, LSL #1]\n"
+    "inch x25, ALL, MUL #4\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd z23.h, z2.h, z1.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.h, z0.h, z31.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
+    "fadd z19.h, z23.h, z19.h\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z6.h, z6.h, z2.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul z6.h, z6.h, z7.h\n"
+    "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "inch x28\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..5fb297e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..0f377d9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const __fp16 *const *const inptrs;
+    __fp16 *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const __fp16 *const *input_ptrs,
+      __fp16 *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "mov x12, #0x0\n"
+    "ldp x11, x10, [x19, #0x0]\n"
+    "whilelt p1.h, x13, x14\n"
+    "ldp x9, x28, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1h { z31.h }, p1/Z, [x26, x13, LSL #1]\n"
+    "ld1h { z30.h }, p1/Z, [x23, x13, LSL #1]\n"
+    "ld1h { z29.h }, p1/Z, [x20, x13, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x24, x13, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x27, x13, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x22, x13, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x25, x13, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x21, x13, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x19, x13, LSL #1]\n"
+    "incw x13\n"
+    "whilelt p1.h, x13, x14\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
+    "ld1h { z31.h }, p1/Z, [x26, x13, LSL #1]\n"
+    "whilelt p0.h, x12, x14\n"
+    "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
+    "ld1h { z30.h }, p1/Z, [x23, x13, LSL #1]\n"
+    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z27.h\n"
+    "ld1h { z29.h }, p1/Z, [x20, x13, LSL #1]\n"
+    "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
+    "ld1h { z27.h }, p1/Z, [x27, x13, LSL #1]\n"
+    "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z28.h\n"
+    "ld1h { z28.h }, p1/Z, [x24, x13, LSL #1]\n"
+    "movprfx z20, z26\n fmax z20.h, p2/M, z20.h, z23.h\n"
+    "ld1h { z26.h }, p1/Z, [x22, x13, LSL #1]\n"
+    "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
+    "ld1h { z25.h }, p1/Z, [x25, x13, LSL #1]\n"
+    "movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
+    "ld1h { z24.h }, p1/Z, [x21, x13, LSL #1]\n"
+    "movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
+    "ld1h { z23.h }, p1/Z, [x19, x13, LSL #1]\n"
+    "incw x13\n"
+    "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
+    "st1h { z19.h }, p0, [x11, x12, LSL #1]\n"
+    "whilelt p1.h, x13, x14\n"
+    "st1h { z18.h }, p0, [x10, x12, LSL #1]\n"
+    "st1h { z17.h }, p0, [x9, x12, LSL #1]\n"
+    "st1h { z16.h }, p0, [x28, x12, LSL #1]\n"
+    "incw x12\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
+    "whilelt p0.h, x12, x14\n"
+    "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
+    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z27.h\n"
+    "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
+    "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z28.h\n"
+    "movprfx z20, z26\n fmax z20.h, p2/M, z20.h, z23.h\n"
+    "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
+    "st1h { z19.h }, p0, [x11, x12, LSL #1]\n"
+    "movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
+    "movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
+    "st1h { z18.h }, p0, [x10, x12, LSL #1]\n"
+    "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
+    "st1h { z17.h }, p0, [x9, x12, LSL #1]\n"
+    "st1h { z16.h }, p0, [x28, x12, LSL #1]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..92cccd5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp16_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+struct sve_fp16_nhwc_max_generic_depthfirst
+{
+  typedef __fp16 operand_type;
+  typedef __fp16 return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_fp16_nhwc_max_generic_depthfirst_impl;
+
+  sve_fp16_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..bbd3213
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_fp16_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const __fp16 *const *const inptrs,
+  __fp16 *outptr
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cnth x27\n"
+    "cnth x26, ALL, MUL #2\n"
+    "cnth x25, ALL, MUL #3\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "whilelt p2.h, x27, %x[n_channels]\n"
+    "whilelt p1.h, x26, %x[n_channels]\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z7.h, #0xfc00\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z6.h, #0xfc00\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.h, #0xfc00\n"
+    "mov z4.h, #0xfc00\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z22.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z21.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
+    "add x19, x19, #0x20\n"
+    "fmax z22.h, p4/M, z22.h, z29.h\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
+    "fmax z21.h, p4/M, z21.h, z26.h\n"
+    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "fmax z16.h, p4/M, z16.h, z25.h\n"
+    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "fmax z20.h, p4/M, z20.h, z24.h\n"
+    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "fmax z19.h, p4/M, z19.h, z23.h\n"
+    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "fmax z18.h, p4/M, z18.h, z22.h\n"
+    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "fmax z17.h, p4/M, z17.h, z21.h\n"
+    "ld1h { z22.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "fmax z16.h, p4/M, z16.h, z20.h\n"
+    "ld1h { z29.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "fmax z7.h, p4/M, z7.h, z19.h\n"
+    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "fmax z6.h, p4/M, z6.h, z18.h\n"
+    "ld1h { z27.h }, p1/Z, [x22, x26, LSL #1]\n"
+    "fmax z5.h, p4/M, z5.h, z17.h\n"
+    "ld1h { z21.h }, p1/Z, [x21, x26, LSL #1]\n"
+    "fmax z4.h, p4/M, z4.h, z16.h\n"
+    "ld1h { z26.h }, p1/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p0/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z20.h }, p0/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
+    "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
+    "fmax z22.h, p4/M, z22.h, z29.h\n"
+    "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
+    "fmax z21.h, p4/M, z21.h, z26.h\n"
+    "fmax z16.h, p4/M, z16.h, z25.h\n"
+    "fmax z20.h, p4/M, z20.h, z24.h\n"
+    "fmax z19.h, p4/M, z19.h, z23.h\n"
+    "fmax z18.h, p4/M, z18.h, z22.h\n"
+    "fmax z17.h, p4/M, z17.h, z21.h\n"
+    "fmax z16.h, p4/M, z16.h, z20.h\n"
+    "fmax z7.h, p4/M, z7.h, z19.h\n"
+    "fmax z6.h, p4/M, z6.h, z18.h\n"
+    "fmax z5.h, p4/M, z5.h, z17.h\n"
+    "fmax z4.h, p4/M, z4.h, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fmax z7.h, p4/M, z7.h, z3.h\n"
+    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
+    "fmax z6.h, p4/M, z6.h, z31.h\n"
+    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
+    "fmax z5.h, p4/M, z5.h, z28.h\n"
+    "fmax z4.h, p4/M, z4.h, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "inch x28, ALL, MUL #4\n"
+    "st1h { z6.h }, p2, [%x[outptr], x27, LSL #1]\n"
+    "inch x27, ALL, MUL #4\n"
+    "st1h { z5.h }, p1, [%x[outptr], x26, LSL #1]\n"
+    "inch x26, ALL, MUL #4\n"
+    "st1h { z4.h }, p0, [%x[outptr], x25, LSL #1]\n"
+    "inch x25, ALL, MUL #4\n"
+    "whilelt p0.h, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z7.h, #0xfc00\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z19.h, p4/M, z19.h, z23.h\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fmax z7.h, p4/M, z7.h, z19.h\n"
+    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
+    "fmax z19.h, p4/M, z19.h, z23.h\n"
+    "fmax z7.h, p4/M, z7.h, z19.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fmax z7.h, p4/M, z7.h, z3.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "inch x28\n"
+    "whilelt p3.h, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(__ARM_FP16_ARGS)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..9cbdb8a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  constexpr static unsigned int pool_rows(void) { return 3; }
+  constexpr static unsigned int pool_cols(void) { return 3; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
+
+  sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..03f1736
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    float rescale_vals[4];
+
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool exclude_padding, unsigned int pad_left, unsigned int pad_top, unsigned int pad_right, unsigned int pad_bottom
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+      for (unsigned int i = 0; i < 2; i++)
+      {
+        const int start_i = 1*i - static_cast<int>(pad_top);
+        const int end_i = std::min<int>(start_i + 3, 4 - pad_top - pad_bottom);
+        const int valid_rows = end_i - std::max<int>(0, start_i);
+
+        for (unsigned int j = 0; j < 2; j++)
+        {
+          const int start_j = 1*j - static_cast<int>(pad_left);
+          const int end_j = std::min<int>(start_j + 3, 4 - pad_left - pad_right);
+          const int valid_cols = end_j - std::max<int>(0, start_j);
+
+          rescale_vals[i*2 + j] = static_cast<float>(1.0f / static_cast<float>(
+            exclude_padding ? valid_rows * valid_cols : 9
+          ));
+        }
+      }
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
+    "mov x4, #0x0\n"
+    "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x5, #0x0\n"
+    "mov x19, #0x4\n"
+    "ldp x6, x7, [x20, #0x0]\n"
+    "whilelt p0.s, XZR, x19\n"
+    "add x8, %x[args], %[offsetof_rescale]\n"
+    "ldp x17, x16, [x20, #0x10]\n"
+    "whilelt p1.s, x4, x3\n"
+    "ldr x15, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x14, x13, [x15, #0x0]\n"
+    "ldp x12, x11, [x15, #0x10]\n"
+    "ldp x10, x9, [x15, #0x20]\n"
+    "ldp x28, x27, [x15, #0x30]\n"
+    "ldp x26, x25, [x15, #0x40]\n"
+    "ldp x24, x23, [x15, #0x50]\n"
+    "ldp x22, x21, [x15, #0x60]\n"
+    "ldp x20, x19, [x15, #0x70]\n"
+    "ld1rqw { z7.s }, p0/Z, [x8]\n"
+    "ld1w { z8.s }, p1/Z, [x9, x4, LSL #2]\n"
+    "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
+    "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
+    "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
+    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+    "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+    "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
+    "ld1w { z0.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "ld1w { z31.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "ld1w { z30.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "ld1w { z29.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x14, x4, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "incw x4\n"
+    "whilelt p1.s, x4, x3\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "fadd z17.s, z8.s, z6.s\n"
+    "ld1w { z8.s }, p1/Z, [x9, x4, LSL #2]\n"
+    "whilelt p0.s, x5, x3\n"
+    "fadd z16.s, z5.s, z4.s\n"
+    "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
+    "fadd z18.s, z3.s, z2.s\n"
+    "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
+    "fadd z23.s, z1.s, z0.s\n"
+    "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
+    "fadd z22.s, z31.s, z30.s\n"
+    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+    "fadd z17.s, z17.s, z16.s\n"
+    "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+    "fadd z16.s, z29.s, z28.s\n"
+    "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
+    "fadd z19.s, z27.s, z23.s\n"
+    "ld1w { z0.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "fadd z21.s, z18.s, z17.s\n"
+    "ld1w { z31.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "fadd z20.s, z16.s, z17.s\n"
+    "ld1w { z30.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "fadd z18.s, z26.s, z22.s\n"
+    "ld1w { z29.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "fadd z17.s, z25.s, z23.s\n"
+    "ld1w { z28.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "fadd z16.s, z24.s, z22.s\n"
+    "ld1w { z27.s }, p1/Z, [x14, x4, LSL #2]\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "ld1w { z26.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "fadd z18.s, z18.s, z21.s\n"
+    "ld1w { z25.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "fadd z17.s, z17.s, z20.s\n"
+    "ld1w { z24.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "incw x4\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "whilelt p1.s, x4, x3\n"
+    "fmul z19.s, z19.s, z7.s[0]\n"
+    "st1w { z19.s }, p0, [x6, x5, LSL #2]\n"
+    "fmul z18.s, z18.s, z7.s[1]\n"
+    "fmul z17.s, z17.s, z7.s[2]\n"
+    "st1w { z18.s }, p0, [x7, x5, LSL #2]\n"
+    "fmul z16.s, z16.s, z7.s[3]\n"
+    "st1w { z17.s }, p0, [x17, x5, LSL #2]\n"
+    "st1w { z16.s }, p0, [x16, x5, LSL #2]\n"
+    "incw x5\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "fadd z17.s, z8.s, z6.s\n"
+    "whilelt p0.s, x5, x3\n"
+    "fadd z16.s, z5.s, z4.s\n"
+    "fadd z18.s, z3.s, z2.s\n"
+    "fadd z23.s, z1.s, z0.s\n"
+    "fadd z17.s, z17.s, z16.s\n"
+    "fadd z22.s, z31.s, z30.s\n"
+    "fadd z16.s, z29.s, z28.s\n"
+    "fadd z21.s, z18.s, z17.s\n"
+    "fadd z19.s, z27.s, z23.s\n"
+    "fadd z20.s, z16.s, z17.s\n"
+    "fadd z18.s, z26.s, z22.s\n"
+    "fadd z17.s, z25.s, z23.s\n"
+    "fadd z16.s, z24.s, z22.s\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "fadd z18.s, z18.s, z21.s\n"
+    "fadd z17.s, z17.s, z20.s\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "fmul z19.s, z19.s, z7.s[0]\n"
+    "st1w { z19.s }, p0, [x6, x5, LSL #2]\n"
+    "fmul z18.s, z18.s, z7.s[1]\n"
+    "fmul z17.s, z17.s, z7.s[2]\n"
+    "st1w { z18.s }, p0, [x7, x5, LSL #2]\n"
+    "fmul z16.s, z16.s, z7.s[3]\n"
+    "st1w { z17.s }, p0, [x17, x5, LSL #2]\n"
+    "st1w { z16.s }, p0, [x16, x5, LSL #2]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
+    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..de315d2
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct sve_fp32_nhwc_avg_generic_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_fp32_nhwc_avg_generic_depthfirst_impl;
+
+  sve_fp32_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..218c1f9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_fp32_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
+
+  __asm__ __volatile__(
+    "ptrue p0.b\n"
+    "ld1rw { z7.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "cntw x27\n"
+    "cntw x26, ALL, MUL #2\n"
+    "cntw x25, ALL, MUL #3\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "whilelt p2.s, x27, %x[n_channels]\n"
+    "whilelt p1.s, x26, %x[n_channels]\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z5.b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z4.b, #0x0\n"
+    "mov z3.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "fadd z23.s, z2.s, z1.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.s, z0.s, z31.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z22.s, z30.s, z22.s\n"
+    "add x19, x19, #0x20\n"
+    "fadd z18.s, z29.s, z28.s\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z21.s, z27.s, z21.s\n"
+    "fadd z17.s, z26.s, z17.s\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "fadd z20.s, z25.s, z20.s\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "fadd z16.s, z24.s, z16.s\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "fadd z18.s, z22.s, z18.s\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "fadd z17.s, z21.s, z17.s\n"
+    "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "fadd z5.s, z5.s, z18.s\n"
+    "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "fadd z4.s, z4.s, z17.s\n"
+    "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "fadd z3.s, z3.s, z16.s\n"
+    "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
+    "fadd z22.s, z30.s, z22.s\n"
+    "fadd z18.s, z29.s, z28.s\n"
+    "fadd z21.s, z27.s, z21.s\n"
+    "fadd z17.s, z26.s, z17.s\n"
+    "fadd z20.s, z25.s, z20.s\n"
+    "fadd z16.s, z24.s, z16.s\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "fadd z18.s, z22.s, z18.s\n"
+    "fadd z17.s, z21.s, z17.s\n"
+    "fadd z16.s, z20.s, z16.s\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "fadd z5.s, z5.s, z18.s\n"
+    "fadd z4.s, z4.s, z17.s\n"
+    "fadd z3.s, z3.s, z16.s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z6.s, z6.s, z2.s\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "fadd z5.s, z5.s, z30.s\n"
+    "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "fadd z4.s, z4.s, z27.s\n"
+    "fadd z3.s, z3.s, z25.s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "fmul z6.s, z6.s, z7.s\n"
+    "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "fmul z5.s, z5.s, z7.s\n"
+    "incw x28, ALL, MUL #4\n"
+    "fmul z4.s, z4.s, z7.s\n"
+    "st1w { z5.s }, p2, [%x[outptr], x27, LSL #2]\n"
+    "fmul z3.s, z3.s, z7.s\n"
+    "incw x27, ALL, MUL #4\n"
+    "st1w { z4.s }, p1, [%x[outptr], x26, LSL #2]\n"
+    "incw x26, ALL, MUL #4\n"
+    "st1w { z3.s }, p0, [%x[outptr], x25, LSL #2]\n"
+    "incw x25, ALL, MUL #4\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "fadd z23.s, z2.s, z1.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z19.s, z0.s, z31.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
+    "fadd z19.s, z23.s, z19.s\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z6.s, z6.s, z2.s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "fmul z6.s, z6.s, z7.s\n"
+    "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "incw x28\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..086f49e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..279c690
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const float *const *const inptrs,
+  float *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const float *const *const inptrs;
+    float *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const float *const *input_ptrs,
+      float *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "mov x12, #0x0\n"
+    "ldp x11, x10, [x19, #0x0]\n"
+    "whilelt p1.s, x13, x14\n"
+    "ldp x9, x28, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1w { z31.s }, p1/Z, [x26, x13, LSL #2]\n"
+    "ld1w { z30.s }, p1/Z, [x23, x13, LSL #2]\n"
+    "ld1w { z29.s }, p1/Z, [x20, x13, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x24, x13, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x27, x13, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x22, x13, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x25, x13, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x21, x13, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x19, x13, LSL #2]\n"
+    "incw x13\n"
+    "whilelt p1.s, x13, x14\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
+    "ld1w { z31.s }, p1/Z, [x26, x13, LSL #2]\n"
+    "whilelt p0.s, x12, x14\n"
+    "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
+    "ld1w { z30.s }, p1/Z, [x23, x13, LSL #2]\n"
+    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z27.s\n"
+    "ld1w { z29.s }, p1/Z, [x20, x13, LSL #2]\n"
+    "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
+    "ld1w { z27.s }, p1/Z, [x27, x13, LSL #2]\n"
+    "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z28.s\n"
+    "ld1w { z28.s }, p1/Z, [x24, x13, LSL #2]\n"
+    "movprfx z20, z26\n fmax z20.s, p2/M, z20.s, z23.s\n"
+    "ld1w { z26.s }, p1/Z, [x22, x13, LSL #2]\n"
+    "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
+    "ld1w { z25.s }, p1/Z, [x25, x13, LSL #2]\n"
+    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z17.s\n"
+    "ld1w { z24.s }, p1/Z, [x21, x13, LSL #2]\n"
+    "movprfx z17, z21\n fmax z17.s, p2/M, z17.s, z16.s\n"
+    "ld1w { z23.s }, p1/Z, [x19, x13, LSL #2]\n"
+    "incw x13\n"
+    "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
+    "st1w { z19.s }, p0, [x11, x12, LSL #2]\n"
+    "whilelt p1.s, x13, x14\n"
+    "st1w { z18.s }, p0, [x10, x12, LSL #2]\n"
+    "st1w { z17.s }, p0, [x9, x12, LSL #2]\n"
+    "st1w { z16.s }, p0, [x28, x12, LSL #2]\n"
+    "incw x12\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
+    "whilelt p0.s, x12, x14\n"
+    "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
+    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z27.s\n"
+    "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
+    "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z28.s\n"
+    "movprfx z20, z26\n fmax z20.s, p2/M, z20.s, z23.s\n"
+    "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
+    "st1w { z19.s }, p0, [x11, x12, LSL #2]\n"
+    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z17.s\n"
+    "movprfx z17, z21\n fmax z17.s, p2/M, z17.s, z16.s\n"
+    "st1w { z18.s }, p0, [x10, x12, LSL #2]\n"
+    "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
+    "st1w { z17.s }, p0, [x9, x12, LSL #2]\n"
+    "st1w { z16.s }, p0, [x28, x12, LSL #2]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..ba5138d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_fp32_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+struct sve_fp32_nhwc_max_generic_depthfirst
+{
+  typedef float operand_type;
+  typedef float return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_fp32_nhwc_max_generic_depthfirst_impl;
+
+  sve_fp32_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..775595f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_fp32_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const float *const *const inptrs,
+  float *outptr
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cntw x27\n"
+    "cntw x26, ALL, MUL #2\n"
+    "cntw x25, ALL, MUL #3\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "whilelt p2.s, x27, %x[n_channels]\n"
+    "whilelt p1.s, x26, %x[n_channels]\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z7.s, #0xff800000\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z6.s, #0xff800000\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.s, #0xff800000\n"
+    "mov z4.s, #0xff800000\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z22.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z21.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
+    "add x19, x19, #0x20\n"
+    "fmax z22.s, p4/M, z22.s, z29.s\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
+    "fmax z21.s, p4/M, z21.s, z26.s\n"
+    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "fmax z16.s, p4/M, z16.s, z25.s\n"
+    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "fmax z20.s, p4/M, z20.s, z24.s\n"
+    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "fmax z19.s, p4/M, z19.s, z23.s\n"
+    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "fmax z18.s, p4/M, z18.s, z22.s\n"
+    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "fmax z17.s, p4/M, z17.s, z21.s\n"
+    "ld1w { z22.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "fmax z16.s, p4/M, z16.s, z20.s\n"
+    "ld1w { z29.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fmax z7.s, p4/M, z7.s, z19.s\n"
+    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "fmax z6.s, p4/M, z6.s, z18.s\n"
+    "ld1w { z27.s }, p1/Z, [x22, x26, LSL #2]\n"
+    "fmax z5.s, p4/M, z5.s, z17.s\n"
+    "ld1w { z21.s }, p1/Z, [x21, x26, LSL #2]\n"
+    "fmax z4.s, p4/M, z4.s, z16.s\n"
+    "ld1w { z26.s }, p1/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p0/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z20.s }, p0/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
+    "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
+    "fmax z22.s, p4/M, z22.s, z29.s\n"
+    "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
+    "fmax z21.s, p4/M, z21.s, z26.s\n"
+    "fmax z16.s, p4/M, z16.s, z25.s\n"
+    "fmax z20.s, p4/M, z20.s, z24.s\n"
+    "fmax z19.s, p4/M, z19.s, z23.s\n"
+    "fmax z18.s, p4/M, z18.s, z22.s\n"
+    "fmax z17.s, p4/M, z17.s, z21.s\n"
+    "fmax z16.s, p4/M, z16.s, z20.s\n"
+    "fmax z7.s, p4/M, z7.s, z19.s\n"
+    "fmax z6.s, p4/M, z6.s, z18.s\n"
+    "fmax z5.s, p4/M, z5.s, z17.s\n"
+    "fmax z4.s, p4/M, z4.s, z16.s\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fmax z7.s, p4/M, z7.s, z3.s\n"
+    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
+    "fmax z6.s, p4/M, z6.s, z31.s\n"
+    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
+    "fmax z5.s, p4/M, z5.s, z28.s\n"
+    "fmax z4.s, p4/M, z4.s, z16.s\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "incw x28, ALL, MUL #4\n"
+    "st1w { z6.s }, p2, [%x[outptr], x27, LSL #2]\n"
+    "incw x27, ALL, MUL #4\n"
+    "st1w { z5.s }, p1, [%x[outptr], x26, LSL #2]\n"
+    "incw x26, ALL, MUL #4\n"
+    "st1w { z4.s }, p0, [%x[outptr], x25, LSL #2]\n"
+    "incw x25, ALL, MUL #4\n"
+    "whilelt p0.s, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z7.s, #0xff800000\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z19.s, p4/M, z19.s, z23.s\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fmax z7.s, p4/M, z7.s, z19.s\n"
+    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
+    "fmax z19.s, p4/M, z19.s, z23.s\n"
+    "fmax z7.s, p4/M, z7.s, z19.s\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fmax z7.s, p4/M, z7.s, z3.s\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "incw x28\n"
+    "whilelt p3.s, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..575977d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct sve_s8_nhwc_avg_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_s8_nhwc_avg_generic_depthfirst_impl;
+
+  sve_s8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..99321eb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sve_s8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p2.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x24, %x[n_channels]\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x4508a3b0  // sshllb z16.h, z29.b, #0x0\n"
+    ".inst 0x4590416b  // saddwb z11.s, z11.s, z16.h\n"
+    ".inst 0x4590454a  // saddwt z10.s, z10.s, z16.h\n"
+    ".inst 0x4508a7b0  // sshllt z16.h, z29.b, #0x0\n"
+    ".inst 0x45904129  // saddwb z9.s, z9.s, z16.h\n"
+    ".inst 0x45904508  // saddwt z8.s, z8.s, z16.h\n"
+    ".inst 0x4508a370  // sshllb z16.h, z27.b, #0x0\n"
+    ".inst 0x459040e7  // saddwb z7.s, z7.s, z16.h\n"
+    ".inst 0x459044c6  // saddwt z6.s, z6.s, z16.h\n"
+    ".inst 0x4508a770  // sshllt z16.h, z27.b, #0x0\n"
+    ".inst 0x459040a5  // saddwb z5.s, z5.s, z16.h\n"
+    ".inst 0x45904484  // saddwt z4.s, z4.s, z16.h\n"
+    ".inst 0x4508a330  // sshllb z16.h, z25.b, #0x0\n"
+    ".inst 0x45904063  // saddwb z3.s, z3.s, z16.h\n"
+    ".inst 0x45904442  // saddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z20.s, #0x7f\n"
+    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    "not z19.s, p4/M, z20.s\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x04b1756b  // sqrdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x04b1754a  // sqrdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqrdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x04b17508  // sqrdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqrdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x04b174c6  // sqrdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqrdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x04b17484  // sqrdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqrdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqrdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqrdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x04b17400  // sqrdmulh z0.s, z0.s, z17.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
+    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
+    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
+    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
+    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
+    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
+    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
+    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
+    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
+    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
+    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
+    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z19.s\n"
+    "smax z14.s, p4/M, z14.s, z19.s\n"
+    "smax z13.s, p4/M, z13.s, z19.s\n"
+    "smax z12.s, p4/M, z12.s, z19.s\n"
+    "smin z15.s, p4/M, z15.s, z20.s\n"
+    "smin z14.s, p4/M, z14.s, z20.s\n"
+    "smin z13.s, p4/M, z13.s, z20.s\n"
+    "smin z12.s, p4/M, z12.s, z20.s\n"
+    "smax z11.s, p4/M, z11.s, z19.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "smax z10.s, p4/M, z10.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p4/M, z11.s, z20.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "smin z10.s, p4/M, z10.s, z20.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smax z9.s, p4/M, z9.s, z19.s\n"
+    "smax z8.s, p4/M, z8.s, z19.s\n"
+    "smax z7.s, p4/M, z7.s, z19.s\n"
+    "smax z6.s, p4/M, z6.s, z19.s\n"
+    "trn1 z18.h, z11.h, z10.h\n"
+    "smin z9.s, p4/M, z9.s, z20.s\n"
+    "smin z8.s, p4/M, z8.s, z20.s\n"
+    "smin z7.s, p4/M, z7.s, z20.s\n"
+    "smin z6.s, p4/M, z6.s, z20.s\n"
+    "smax z5.s, p4/M, z5.s, z19.s\n"
+    "trn1 z16.h, z9.h, z8.h\n"
+    "smax z4.s, p4/M, z4.s, z19.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+    "smin z5.s, p4/M, z5.s, z20.s\n"
+    "incb x25, ALL, MUL #4\n"
+    "smin z4.s, p4/M, z4.s, z20.s\n"
+    "smax z3.s, p4/M, z3.s, z19.s\n"
+    "smax z2.s, p4/M, z2.s, z19.s\n"
+    "smax z1.s, p4/M, z1.s, z19.s\n"
+    "smax z0.s, p4/M, z0.s, z19.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "smin z3.s, p4/M, z3.s, z20.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+    "smin z2.s, p4/M, z2.s, z20.s\n"
+    "incb x24, ALL, MUL #4\n"
+    "smin z1.s, p4/M, z1.s, z20.s\n"
+    "smin z0.s, p4/M, z0.s, z20.s\n"
+    "trn1 z17.h, z3.h, z2.h\n"
+    "trn1 z16.h, z1.h, z0.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
+    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
+    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z20.s, #0x7f\n"
+    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    "not z19.s, p4/M, z20.s\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z19.s\n"
+    "smax z14.s, p4/M, z14.s, z19.s\n"
+    "smax z13.s, p4/M, z13.s, z19.s\n"
+    "smax z12.s, p4/M, z12.s, z19.s\n"
+    "smin z15.s, p4/M, z15.s, z20.s\n"
+    "smin z14.s, p4/M, z14.s, z20.s\n"
+    "smin z13.s, p4/M, z13.s, z20.s\n"
+    "smin z12.s, p4/M, z12.s, z20.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..071e79c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..06c777b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const int8_t *const *const inptrs;
+    int8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const int8_t *const *input_ptrs,
+      int8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "mov x12, #0x0\n"
+    "ldp x11, x10, [x19, #0x0]\n"
+    "whilelt p1.b, x13, x14\n"
+    "ldp x9, x28, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
+    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
+    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
+    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
+    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "incw x13\n"
+    "whilelt p1.b, x13, x14\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
+    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
+    "whilelt p0.b, x12, x14\n"
+    "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
+    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
+    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z27.b\n"
+    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
+    "movprfx z20, z26\n smax z20.b, p2/M, z20.b, z25.b\n"
+    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
+    "movprfx z17, z24\n smax z17.b, p2/M, z17.b, z28.b\n"
+    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
+    "movprfx z16, z26\n smax z16.b, p2/M, z16.b, z23.b\n"
+    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
+    "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
+    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
+    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
+    "smax z17.b, p2/M, z17.b, z21.b\n"
+    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "incw x13\n"
+    "smax z16.b, p2/M, z16.b, z21.b\n"
+    "st1b { z19.b }, p0, [x11, x12]\n"
+    "whilelt p1.b, x13, x14\n"
+    "st1b { z18.b }, p0, [x10, x12]\n"
+    "st1b { z17.b }, p0, [x9, x12]\n"
+    "st1b { z16.b }, p0, [x28, x12]\n"
+    "incw x12\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
+    "whilelt p0.b, x12, x14\n"
+    "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
+    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z27.b\n"
+    "movprfx z20, z26\n smax z20.b, p2/M, z20.b, z25.b\n"
+    "movprfx z17, z24\n smax z17.b, p2/M, z17.b, z28.b\n"
+    "movprfx z16, z26\n smax z16.b, p2/M, z16.b, z23.b\n"
+    "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
+    "st1b { z19.b }, p0, [x11, x12]\n"
+    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+    "smax z17.b, p2/M, z17.b, z21.b\n"
+    "st1b { z18.b }, p0, [x10, x12]\n"
+    "smax z16.b, p2/M, z16.b, z21.b\n"
+    "st1b { z17.b }, p0, [x9, x12]\n"
+    "st1b { z16.b }, p0, [x28, x12]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..7490a92
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+struct sve_s8_nhwc_max_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_s8_nhwc_max_generic_depthfirst_impl;
+
+  sve_s8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..5c4c18b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_s8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p2.b, x27, %x[n_channels]\n"
+    "whilelt p1.b, x26, %x[n_channels]\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z7.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z6.b, #0x80\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x80\n"
+    "mov z4.b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+    "add x19, x19, #0x20\n"
+    "smax z22.b, p4/M, z22.b, z29.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+    "smax z21.b, p4/M, z21.b, z26.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "smax z16.b, p4/M, z16.b, z25.b\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "smax z20.b, p4/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "smax z18.b, p4/M, z18.b, z22.b\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "smax z17.b, p4/M, z17.b, z21.b\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "smax z16.b, p4/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "smax z7.b, p4/M, z7.b, z19.b\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "smax z6.b, p4/M, z6.b, z18.b\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "smax z5.b, p4/M, z5.b, z17.b\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "smax z4.b, p4/M, z4.b, z16.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+    "smax z22.b, p4/M, z22.b, z29.b\n"
+    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+    "smax z21.b, p4/M, z21.b, z26.b\n"
+    "smax z16.b, p4/M, z16.b, z25.b\n"
+    "smax z20.b, p4/M, z20.b, z24.b\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "smax z18.b, p4/M, z18.b, z22.b\n"
+    "smax z17.b, p4/M, z17.b, z21.b\n"
+    "smax z16.b, p4/M, z16.b, z20.b\n"
+    "smax z7.b, p4/M, z7.b, z19.b\n"
+    "smax z6.b, p4/M, z6.b, z18.b\n"
+    "smax z5.b, p4/M, z5.b, z17.b\n"
+    "smax z4.b, p4/M, z4.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z7.b, p4/M, z7.b, z3.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "smax z6.b, p4/M, z6.b, z31.b\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "smax z5.b, p4/M, z5.b, z28.b\n"
+    "smax z4.b, p4/M, z4.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z4.b }, p0, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z7.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z7.b, p4/M, z7.b, z19.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "smax z7.b, p4/M, z7.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z7.b, p4/M, z7.b, z3.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..8eb7a39
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct sve_s8q_nhwc_avg_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_s8q_nhwc_avg_generic_depthfirst_impl;
+
+  sve_s8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..51d2973
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sve_s8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "q16", "q17", "q18"
+  );
+
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p2.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x24, %x[n_channels]\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x4508a3b0  // sshllb z16.h, z29.b, #0x0\n"
+    ".inst 0x4590416b  // saddwb z11.s, z11.s, z16.h\n"
+    ".inst 0x4590454a  // saddwt z10.s, z10.s, z16.h\n"
+    ".inst 0x4508a7b0  // sshllt z16.h, z29.b, #0x0\n"
+    ".inst 0x45904129  // saddwb z9.s, z9.s, z16.h\n"
+    ".inst 0x45904508  // saddwt z8.s, z8.s, z16.h\n"
+    ".inst 0x4508a370  // sshllb z16.h, z27.b, #0x0\n"
+    ".inst 0x459040e7  // saddwb z7.s, z7.s, z16.h\n"
+    ".inst 0x459044c6  // saddwt z6.s, z6.s, z16.h\n"
+    ".inst 0x4508a770  // sshllt z16.h, z27.b, #0x0\n"
+    ".inst 0x459040a5  // saddwb z5.s, z5.s, z16.h\n"
+    ".inst 0x45904484  // saddwt z4.s, z4.s, z16.h\n"
+    ".inst 0x4508a330  // sshllb z16.h, z25.b, #0x0\n"
+    ".inst 0x45904063  // saddwb z3.s, z3.s, z16.h\n"
+    ".inst 0x45904442  // saddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z20.s, #0x7f\n"
+    "ld1rw { z18.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "ld1rw { z17.s }, p4/Z, [%x[left_shift]]\n"
+    "not z19.s, p4/M, z20.s\n"
+    "ld1rw { z16.s }, p4/Z, [%x[right_shift]]\n"
+    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
+    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
+    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
+    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    ".inst 0x4482922b  // srshl z11.s, p4/M, z11.s, z17.s\n"
+    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
+    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
+    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
+    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
+    ".inst 0x04b2756b  // sqrdmulh z11.s, z11.s, z18.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
+    ".inst 0x4482922a  // srshl z10.s, p4/M, z10.s, z17.s\n"
+    ".inst 0x44829229  // srshl z9.s, p4/M, z9.s, z17.s\n"
+    ".inst 0x44829228  // srshl z8.s, p4/M, z8.s, z17.s\n"
+    ".inst 0x44829227  // srshl z7.s, p4/M, z7.s, z17.s\n"
+    ".inst 0x04b2754a  // sqrdmulh z10.s, z10.s, z18.s\n"
+    ".inst 0x04b27529  // sqrdmulh z9.s, z9.s, z18.s\n"
+    ".inst 0x04b27508  // sqrdmulh z8.s, z8.s, z18.s\n"
+    ".inst 0x04b274e7  // sqrdmulh z7.s, z7.s, z18.s\n"
+    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
+    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
+    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
+    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
+    ".inst 0x44829226  // srshl z6.s, p4/M, z6.s, z17.s\n"
+    ".inst 0x44829225  // srshl z5.s, p4/M, z5.s, z17.s\n"
+    ".inst 0x44829224  // srshl z4.s, p4/M, z4.s, z17.s\n"
+    ".inst 0x44829223  // srshl z3.s, p4/M, z3.s, z17.s\n"
+    ".inst 0x04b274c6  // sqrdmulh z6.s, z6.s, z18.s\n"
+    ".inst 0x04b274a5  // sqrdmulh z5.s, z5.s, z18.s\n"
+    ".inst 0x04b27484  // sqrdmulh z4.s, z4.s, z18.s\n"
+    ".inst 0x04b27463  // sqrdmulh z3.s, z3.s, z18.s\n"
+    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
+    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
+    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
+    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
+    ".inst 0x44829222  // srshl z2.s, p4/M, z2.s, z17.s\n"
+    ".inst 0x44829221  // srshl z1.s, p4/M, z1.s, z17.s\n"
+    ".inst 0x44829220  // srshl z0.s, p4/M, z0.s, z17.s\n"
+    "smax z15.s, p4/M, z15.s, z19.s\n"
+    ".inst 0x04b27442  // sqrdmulh z2.s, z2.s, z18.s\n"
+    ".inst 0x04b27421  // sqrdmulh z1.s, z1.s, z18.s\n"
+    ".inst 0x04b27400  // sqrdmulh z0.s, z0.s, z18.s\n"
+    "smin z15.s, p4/M, z15.s, z20.s\n"
+    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
+    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
+    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
+    "smax z14.s, p4/M, z14.s, z19.s\n"
+    "smax z13.s, p4/M, z13.s, z19.s\n"
+    "smax z12.s, p4/M, z12.s, z19.s\n"
+    "smax z11.s, p4/M, z11.s, z19.s\n"
+    "smin z14.s, p4/M, z14.s, z20.s\n"
+    "smin z13.s, p4/M, z13.s, z20.s\n"
+    "smin z12.s, p4/M, z12.s, z20.s\n"
+    "smin z11.s, p4/M, z11.s, z20.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "smax z10.s, p4/M, z10.s, z19.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smax z9.s, p4/M, z9.s, z19.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "smin z10.s, p4/M, z10.s, z20.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smin z9.s, p4/M, z9.s, z20.s\n"
+    "smax z8.s, p4/M, z8.s, z19.s\n"
+    "smax z7.s, p4/M, z7.s, z19.s\n"
+    "smax z6.s, p4/M, z6.s, z19.s\n"
+    "trn1 z18.h, z11.h, z10.h\n"
+    "smin z8.s, p4/M, z8.s, z20.s\n"
+    "smin z7.s, p4/M, z7.s, z20.s\n"
+    "smin z6.s, p4/M, z6.s, z20.s\n"
+    "smax z5.s, p4/M, z5.s, z19.s\n"
+    "trn1 z16.h, z9.h, z8.h\n"
+    "smax z4.s, p4/M, z4.s, z19.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+    "smin z5.s, p4/M, z5.s, z20.s\n"
+    "incb x25, ALL, MUL #4\n"
+    "smin z4.s, p4/M, z4.s, z20.s\n"
+    "smax z3.s, p4/M, z3.s, z19.s\n"
+    "smax z2.s, p4/M, z2.s, z19.s\n"
+    "smax z1.s, p4/M, z1.s, z19.s\n"
+    "smax z0.s, p4/M, z0.s, z19.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "smin z3.s, p4/M, z3.s, z20.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+    "smin z2.s, p4/M, z2.s, z20.s\n"
+    "incb x24, ALL, MUL #4\n"
+    "smin z1.s, p4/M, z1.s, z20.s\n"
+    "smin z0.s, p4/M, z0.s, z20.s\n"
+    "trn1 z17.h, z3.h, z2.h\n"
+    "trn1 z16.h, z1.h, z0.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
+    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
+    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z20.s, #0x7f\n"
+    "ld1rw { z18.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "ld1rw { z17.s }, p4/Z, [%x[left_shift]]\n"
+    "not z19.s, p4/M, z20.s\n"
+    "ld1rw { z16.s }, p4/Z, [%x[right_shift]]\n"
+    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
+    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
+    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
+    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
+    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
+    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
+    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z19.s\n"
+    "smax z14.s, p4/M, z14.s, z19.s\n"
+    "smax z13.s, p4/M, z13.s, z19.s\n"
+    "smax z12.s, p4/M, z12.s, z19.s\n"
+    "smin z15.s, p4/M, z15.s, z20.s\n"
+    "smin z14.s, p4/M, z14.s, z20.s\n"
+    "smin z13.s, p4/M, z13.s, z20.s\n"
+    "smin z12.s, p4/M, z12.s, z20.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..fd8b2f8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_s8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+struct sve_s8q_nhwc_max_generic_depthfirst
+{
+  typedef int8_t operand_type;
+  typedef int8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_s8q_nhwc_max_generic_depthfirst_impl;
+
+  sve_s8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..54f694c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_s8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const int8_t *const *const inptrs,
+  int8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p2.b, x27, %x[n_channels]\n"
+    "whilelt p1.b, x26, %x[n_channels]\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z8.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z7.b, #0x80\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z6.b, #0x80\n"
+    "mov z5.b, #0x80\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+    "add x19, x19, #0x20\n"
+    "smax z22.b, p4/M, z22.b, z29.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+    "smax z21.b, p4/M, z21.b, z26.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "smax z16.b, p4/M, z16.b, z25.b\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "smax z20.b, p4/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "smax z18.b, p4/M, z18.b, z22.b\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "smax z17.b, p4/M, z17.b, z21.b\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "smax z16.b, p4/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "smax z8.b, p4/M, z8.b, z19.b\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "smax z7.b, p4/M, z7.b, z18.b\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "smax z6.b, p4/M, z6.b, z17.b\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "smax z5.b, p4/M, z5.b, z16.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+    "smax z22.b, p4/M, z22.b, z29.b\n"
+    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+    "smax z21.b, p4/M, z21.b, z26.b\n"
+    "smax z16.b, p4/M, z16.b, z25.b\n"
+    "smax z20.b, p4/M, z20.b, z24.b\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "smax z18.b, p4/M, z18.b, z22.b\n"
+    "smax z17.b, p4/M, z17.b, z21.b\n"
+    "smax z16.b, p4/M, z16.b, z20.b\n"
+    "smax z8.b, p4/M, z8.b, z19.b\n"
+    "smax z7.b, p4/M, z7.b, z18.b\n"
+    "smax z6.b, p4/M, z6.b, z17.b\n"
+    "smax z5.b, p4/M, z5.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z8.b, p4/M, z8.b, z3.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "smax z7.b, p4/M, z7.b, z31.b\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "smax z6.b, p4/M, z6.b, z28.b\n"
+    "smax z5.b, p4/M, z5.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z4.s, #0x7f\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    ".inst 0x4508a111  // sshllb z17.h, z8.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    ".inst 0x4508a510  // sshllt z16.h, z8.b, #0x0\n"
+    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    ".inst 0x4508a0f2  // sshllb z18.h, z7.b, #0x0\n"
+    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    ".inst 0x4508a4f7  // sshllt z23.h, z7.b, #0x0\n"
+    ".inst 0x4508a0d6  // sshllb z22.h, z6.b, #0x0\n"
+    ".inst 0x4508a4d5  // sshllt z21.h, z6.b, #0x0\n"
+    ".inst 0x4508a0b4  // sshllb z20.h, z5.b, #0x0\n"
+    ".inst 0x4508a4b3  // sshllt z19.h, z5.b, #0x0\n"
+    ".inst 0x4510a220  // sshllb z0.s, z17.h, #0x0\n"
+    ".inst 0x4510a631  // sshllt z17.s, z17.h, #0x0\n"
+    ".inst 0x4510a21f  // sshllb z31.s, z16.h, #0x0\n"
+    ".inst 0x4510a610  // sshllt z16.s, z16.h, #0x0\n"
+    ".inst 0x4510a25e  // sshllb z30.s, z18.h, #0x0\n"
+    ".inst 0x4510a652  // sshllt z18.s, z18.h, #0x0\n"
+    ".inst 0x4510a2fd  // sshllb z29.s, z23.h, #0x0\n"
+    ".inst 0x4510a6fc  // sshllt z28.s, z23.h, #0x0\n"
+    ".inst 0x4510a2db  // sshllb z27.s, z22.h, #0x0\n"
+    ".inst 0x4510a6da  // sshllt z26.s, z22.h, #0x0\n"
+    ".inst 0x4510a2b9  // sshllb z25.s, z21.h, #0x0\n"
+    ".inst 0x4510a6b8  // sshllt z24.s, z21.h, #0x0\n"
+    ".inst 0x4510a297  // sshllb z23.s, z20.h, #0x0\n"
+    ".inst 0x4510a696  // sshllt z22.s, z20.h, #0x0\n"
+    ".inst 0x4510a275  // sshllb z21.s, z19.h, #0x0\n"
+    ".inst 0x4510a674  // sshllt z20.s, z19.h, #0x0\n"
+    ".inst 0x44829040  // srshl z0.s, p4/M, z0.s, z2.s\n"
+    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
+    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
+    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
+    ".inst 0x44829020  // srshl z0.s, p4/M, z0.s, z1.s\n"
+    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
+    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
+    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
+    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
+    ".inst 0x44829052  // srshl z18.s, p4/M, z18.s, z2.s\n"
+    ".inst 0x4482905d  // srshl z29.s, p4/M, z29.s, z2.s\n"
+    ".inst 0x4482905c  // srshl z28.s, p4/M, z28.s, z2.s\n"
+    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
+    ".inst 0x04a37652  // sqrdmulh z18.s, z18.s, z3.s\n"
+    ".inst 0x04a377bd  // sqrdmulh z29.s, z29.s, z3.s\n"
+    ".inst 0x04a3779c  // sqrdmulh z28.s, z28.s, z3.s\n"
+    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
+    ".inst 0x44829032  // srshl z18.s, p4/M, z18.s, z1.s\n"
+    ".inst 0x4482903d  // srshl z29.s, p4/M, z29.s, z1.s\n"
+    ".inst 0x4482903c  // srshl z28.s, p4/M, z28.s, z1.s\n"
+    ".inst 0x4482905b  // srshl z27.s, p4/M, z27.s, z2.s\n"
+    ".inst 0x4482905a  // srshl z26.s, p4/M, z26.s, z2.s\n"
+    ".inst 0x44829059  // srshl z25.s, p4/M, z25.s, z2.s\n"
+    ".inst 0x44829058  // srshl z24.s, p4/M, z24.s, z2.s\n"
+    ".inst 0x04a3777b  // sqrdmulh z27.s, z27.s, z3.s\n"
+    ".inst 0x04a3775a  // sqrdmulh z26.s, z26.s, z3.s\n"
+    ".inst 0x04a37739  // sqrdmulh z25.s, z25.s, z3.s\n"
+    ".inst 0x04a37718  // sqrdmulh z24.s, z24.s, z3.s\n"
+    ".inst 0x4482903b  // srshl z27.s, p4/M, z27.s, z1.s\n"
+    ".inst 0x4482903a  // srshl z26.s, p4/M, z26.s, z1.s\n"
+    ".inst 0x44829039  // srshl z25.s, p4/M, z25.s, z1.s\n"
+    ".inst 0x44829038  // srshl z24.s, p4/M, z24.s, z1.s\n"
+    ".inst 0x44829057  // srshl z23.s, p4/M, z23.s, z2.s\n"
+    ".inst 0x44829056  // srshl z22.s, p4/M, z22.s, z2.s\n"
+    ".inst 0x44829055  // srshl z21.s, p4/M, z21.s, z2.s\n"
+    ".inst 0x44829054  // srshl z20.s, p4/M, z20.s, z2.s\n"
+    ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
+    ".inst 0x04a376d6  // sqrdmulh z22.s, z22.s, z3.s\n"
+    ".inst 0x04a376b5  // sqrdmulh z21.s, z21.s, z3.s\n"
+    ".inst 0x04a37694  // sqrdmulh z20.s, z20.s, z3.s\n"
+    ".inst 0x44829037  // srshl z23.s, p4/M, z23.s, z1.s\n"
+    ".inst 0x44829036  // srshl z22.s, p4/M, z22.s, z1.s\n"
+    ".inst 0x44829035  // srshl z21.s, p4/M, z21.s, z1.s\n"
+    ".inst 0x44829034  // srshl z20.s, p4/M, z20.s, z1.s\n"
+    "not z19.s, p4/M, z4.s\n"
+    "smax z0.s, p4/M, z0.s, z19.s\n"
+    "smax z17.s, p4/M, z17.s, z19.s\n"
+    "smax z31.s, p4/M, z31.s, z19.s\n"
+    "smax z16.s, p4/M, z16.s, z19.s\n"
+    "smin z0.s, p4/M, z0.s, z4.s\n"
+    "smin z17.s, p4/M, z17.s, z4.s\n"
+    "smin z31.s, p4/M, z31.s, z4.s\n"
+    "smin z16.s, p4/M, z16.s, z4.s\n"
+    "smax z30.s, p4/M, z30.s, z19.s\n"
+    "trn1 z17.h, z0.h, z17.h\n"
+    "smax z18.s, p4/M, z18.s, z19.s\n"
+    "trn1 z16.h, z31.h, z16.h\n"
+    "smin z30.s, p4/M, z30.s, z4.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    "smin z18.s, p4/M, z18.s, z4.s\n"
+    "incb x28, ALL, MUL #4\n"
+    "smax z29.s, p4/M, z29.s, z19.s\n"
+    "smax z28.s, p4/M, z28.s, z19.s\n"
+    "smax z27.s, p4/M, z27.s, z19.s\n"
+    "smax z26.s, p4/M, z26.s, z19.s\n"
+    "trn1 z18.h, z30.h, z18.h\n"
+    "smin z29.s, p4/M, z29.s, z4.s\n"
+    "smin z28.s, p4/M, z28.s, z4.s\n"
+    "smin z27.s, p4/M, z27.s, z4.s\n"
+    "smin z26.s, p4/M, z26.s, z4.s\n"
+    "smax z25.s, p4/M, z25.s, z19.s\n"
+    "trn1 z16.h, z29.h, z28.h\n"
+    "smax z24.s, p4/M, z24.s, z19.s\n"
+    "trn1 z17.h, z27.h, z26.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x27]\n"
+    "smin z25.s, p4/M, z25.s, z4.s\n"
+    "incb x27, ALL, MUL #4\n"
+    "smin z24.s, p4/M, z24.s, z4.s\n"
+    "smax z23.s, p4/M, z23.s, z19.s\n"
+    "smax z22.s, p4/M, z22.s, z19.s\n"
+    "smax z21.s, p4/M, z21.s, z19.s\n"
+    "smax z20.s, p4/M, z20.s, z19.s\n"
+    "trn1 z16.h, z25.h, z24.h\n"
+    "smin z23.s, p4/M, z23.s, z4.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x26]\n"
+    "smin z22.s, p4/M, z22.s, z4.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smin z21.s, p4/M, z21.s, z4.s\n"
+    "smin z20.s, p4/M, z20.s, z4.s\n"
+    "trn1 z17.h, z23.h, z22.h\n"
+    "trn1 z16.h, z21.h, z20.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z8.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z8.b, p4/M, z8.b, z19.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
+    "smax z19.b, p4/M, z19.b, z23.b\n"
+    "smax z8.b, p4/M, z8.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "smax z8.b, p4/M, z8.b, z3.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z4.s, #0x7f\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    ".inst 0x4508a111  // sshllb z17.h, z8.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    ".inst 0x4508a510  // sshllt z16.h, z8.b, #0x0\n"
+    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    ".inst 0x4510a220  // sshllb z0.s, z17.h, #0x0\n"
+    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    ".inst 0x4510a631  // sshllt z17.s, z17.h, #0x0\n"
+    ".inst 0x4510a21f  // sshllb z31.s, z16.h, #0x0\n"
+    ".inst 0x4510a610  // sshllt z16.s, z16.h, #0x0\n"
+    ".inst 0x44829040  // srshl z0.s, p4/M, z0.s, z2.s\n"
+    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
+    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
+    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
+    ".inst 0x44829020  // srshl z0.s, p4/M, z0.s, z1.s\n"
+    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
+    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
+    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
+    "not z19.s, p4/M, z4.s\n"
+    "smax z0.s, p4/M, z0.s, z19.s\n"
+    "smax z17.s, p4/M, z17.s, z19.s\n"
+    "smax z31.s, p4/M, z31.s, z19.s\n"
+    "smax z16.s, p4/M, z16.s, z19.s\n"
+    "smin z0.s, p4/M, z0.s, z4.s\n"
+    "smin z17.s, p4/M, z17.s, z4.s\n"
+    "smin z31.s, p4/M, z31.s, z4.s\n"
+    "smin z16.s, p4/M, z16.s, z4.s\n"
+    "trn1 z17.h, z0.h, z17.h\n"
+    "trn1 z16.h, z31.h, z16.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..e9b3625
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct sve_u8_nhwc_avg_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_u8_nhwc_avg_generic_depthfirst_impl;
+
+  sve_u8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..85d7145
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sve_u8_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p2.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x24, %x[n_channels]\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "mov z11.s, #0x0\n"
+    "mov z10.s, #0x0\n"
+    "mov z9.s, #0x0\n"
+    "mov z8.s, #0x0\n"
+    "mov z7.s, #0x0\n"
+    "mov z6.s, #0x0\n"
+    "mov z5.s, #0x0\n"
+    "mov z4.s, #0x0\n"
+    "mov z3.s, #0x0\n"
+    "mov z2.s, #0x0\n"
+    "mov z1.s, #0x0\n"
+    "mov z0.s, #0x0\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x4508abb0  // ushllb z16.h, z29.b, #0x0\n"
+    ".inst 0x4590496b  // uaddwb z11.s, z11.s, z16.h\n"
+    ".inst 0x45904d4a  // uaddwt z10.s, z10.s, z16.h\n"
+    ".inst 0x4508afb0  // ushllt z16.h, z29.b, #0x0\n"
+    ".inst 0x45904929  // uaddwb z9.s, z9.s, z16.h\n"
+    ".inst 0x45904d08  // uaddwt z8.s, z8.s, z16.h\n"
+    ".inst 0x4508ab70  // ushllb z16.h, z27.b, #0x0\n"
+    ".inst 0x459048e7  // uaddwb z7.s, z7.s, z16.h\n"
+    ".inst 0x45904cc6  // uaddwt z6.s, z6.s, z16.h\n"
+    ".inst 0x4508af70  // ushllt z16.h, z27.b, #0x0\n"
+    ".inst 0x459048a5  // uaddwb z5.s, z5.s, z16.h\n"
+    ".inst 0x45904c84  // uaddwt z4.s, z4.s, z16.h\n"
+    ".inst 0x4508ab30  // ushllb z16.h, z25.b, #0x0\n"
+    ".inst 0x45904863  // uaddwb z3.s, z3.s, z16.h\n"
+    ".inst 0x45904c42  // uaddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z20.s, #0x0\n"
+    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
+    "mov z19.s, #0xff\n"
+    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x04b1756b  // sqrdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x04b1754a  // sqrdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqrdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x04b17508  // sqrdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqrdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x04b174c6  // sqrdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqrdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x04b17484  // sqrdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqrdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqrdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqrdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x04b17400  // sqrdmulh z0.s, z0.s, z17.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
+    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
+    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
+    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
+    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
+    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
+    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
+    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
+    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
+    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
+    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
+    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z20.s\n"
+    "smax z14.s, p4/M, z14.s, z20.s\n"
+    "smax z13.s, p4/M, z13.s, z20.s\n"
+    "smax z12.s, p4/M, z12.s, z20.s\n"
+    "smin z15.s, p4/M, z15.s, z19.s\n"
+    "smin z14.s, p4/M, z14.s, z19.s\n"
+    "smin z13.s, p4/M, z13.s, z19.s\n"
+    "smin z12.s, p4/M, z12.s, z19.s\n"
+    "smax z11.s, p4/M, z11.s, z20.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "smax z10.s, p4/M, z10.s, z20.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smin z11.s, p4/M, z11.s, z19.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "smin z10.s, p4/M, z10.s, z19.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smax z9.s, p4/M, z9.s, z20.s\n"
+    "smax z8.s, p4/M, z8.s, z20.s\n"
+    "smax z7.s, p4/M, z7.s, z20.s\n"
+    "smax z6.s, p4/M, z6.s, z20.s\n"
+    "trn1 z18.h, z11.h, z10.h\n"
+    "smin z9.s, p4/M, z9.s, z19.s\n"
+    "smin z8.s, p4/M, z8.s, z19.s\n"
+    "smin z7.s, p4/M, z7.s, z19.s\n"
+    "smin z6.s, p4/M, z6.s, z19.s\n"
+    "smax z5.s, p4/M, z5.s, z20.s\n"
+    "trn1 z16.h, z9.h, z8.h\n"
+    "smax z4.s, p4/M, z4.s, z20.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+    "smin z5.s, p4/M, z5.s, z19.s\n"
+    "incb x25, ALL, MUL #4\n"
+    "smin z4.s, p4/M, z4.s, z19.s\n"
+    "smax z3.s, p4/M, z3.s, z20.s\n"
+    "smax z2.s, p4/M, z2.s, z20.s\n"
+    "smax z1.s, p4/M, z1.s, z20.s\n"
+    "smax z0.s, p4/M, z0.s, z20.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "smin z3.s, p4/M, z3.s, z19.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+    "smin z2.s, p4/M, z2.s, z19.s\n"
+    "incb x24, ALL, MUL #4\n"
+    "smin z1.s, p4/M, z1.s, z19.s\n"
+    "smin z0.s, p4/M, z0.s, z19.s\n"
+    "trn1 z17.h, z3.h, z2.h\n"
+    "trn1 z16.h, z1.h, z0.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z15.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z14.s, #0x0\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z13.s, #0x0\n"
+    "mov z12.s, #0x0\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
+    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
+    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
+    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z20.s, #0x0\n"
+    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
+    "mov z19.s, #0xff\n"
+    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
+    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
+    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
+    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z20.s\n"
+    "smax z14.s, p4/M, z14.s, z20.s\n"
+    "smax z13.s, p4/M, z13.s, z20.s\n"
+    "smax z12.s, p4/M, z12.s, z20.s\n"
+    "smin z15.s, p4/M, z15.s, z19.s\n"
+    "smin z14.s, p4/M, z14.s, z19.s\n"
+    "smin z13.s, p4/M, z13.s, z19.s\n"
+    "smin z12.s, p4/M, z12.s, z19.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
new file mode 100644
index 0000000..06df151
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+struct sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  constexpr static unsigned int pool_rows(void) { return 2; }
+  constexpr static unsigned int pool_cols(void) { return 2; }
+
+  constexpr static unsigned int stride_rows(void) { return 1; }
+  constexpr static unsigned int stride_cols(void) { return 1; }
+
+  constexpr static unsigned int out_rows(void) { return 2; }
+  constexpr static unsigned int out_cols(void) { return 2; }
+
+  kern_type kernel = sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
+
+  sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
new file mode 100644
index 0000000..22e95a6
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
+  const unsigned int n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *const *const outptrs,
+  const bool exclude_padding,
+  const unsigned int pad_left,
+  const unsigned int pad_top,
+  const unsigned int pad_right,
+  const unsigned int pad_bottom
+)
+{
+  struct KernelArgs
+  {
+    const uint64_t n_channels;
+    const uint8_t *const *const inptrs;
+    uint8_t *const *const outptrs;
+    KernelArgs(
+      unsigned int channels,
+      const uint8_t *const *input_ptrs,
+      uint8_t *const * output_ptrs,
+      bool, unsigned int, unsigned int, unsigned int, unsigned int
+    ) : n_channels(channels),
+        inptrs(input_ptrs),
+        outptrs(output_ptrs)
+    {
+    }
+  };
+
+  const KernelArgs args(n_channels, inptrs, outptrs, exclude_padding,
+                        pad_left, pad_top, pad_right, pad_bottom);
+
+  __asm__ __volatile__(
+    "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+    "ptrue p2.b\n"
+    "ldr x19, [%x[args], %[offsetof_outptrs]]\n"
+    "mov x13, #0x0\n"
+    "mov x12, #0x0\n"
+    "ldp x11, x10, [x19, #0x0]\n"
+    "whilelt p1.b, x13, x14\n"
+    "ldp x9, x28, [x19, #0x10]\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x27, x26, [x19, #0x0]\n"
+    "ldp x25, x24, [x19, #0x10]\n"
+    "ldp x23, x22, [x19, #0x20]\n"
+    "ldp x21, x20, [x19, #0x30]\n"
+    "ldr x19, [x19, #0x40]\n"
+    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
+    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
+    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
+    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
+    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
+    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "incw x13\n"
+    "whilelt p1.b, x13, x14\n"
+    "b.none 2f\n"
+    "1:"  // Vector: Loop
+    "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
+    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
+    "whilelt p0.b, x12, x14\n"
+    "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
+    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
+    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z27.b\n"
+    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
+    "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
+    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
+    "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z28.b\n"
+    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
+    "movprfx z20, z26\n umax z20.b, p2/M, z20.b, z23.b\n"
+    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
+    "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
+    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
+    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z17.b\n"
+    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
+    "movprfx z17, z21\n umax z17.b, p2/M, z17.b, z16.b\n"
+    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "incw x13\n"
+    "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
+    "st1b { z19.b }, p0, [x11, x12]\n"
+    "whilelt p1.b, x13, x14\n"
+    "st1b { z18.b }, p0, [x10, x12]\n"
+    "st1b { z17.b }, p0, [x9, x12]\n"
+    "st1b { z16.b }, p0, [x28, x12]\n"
+    "incw x12\n"
+    "b.any 1b\n"
+    "2:"  // Vector: Tail
+    "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
+    "whilelt p0.b, x12, x14\n"
+    "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
+    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z27.b\n"
+    "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
+    "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z28.b\n"
+    "movprfx z20, z26\n umax z20.b, p2/M, z20.b, z23.b\n"
+    "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
+    "st1b { z19.b }, p0, [x11, x12]\n"
+    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z17.b\n"
+    "movprfx z17, z21\n umax z17.b, p2/M, z17.b, z16.b\n"
+    "st1b { z18.b }, p0, [x10, x12]\n"
+    "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
+    "st1b { z17.b }, p0, [x9, x12]\n"
+    "st1b { z16.b }, p0, [x28, x12]\n"
+    :
+    : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
+    : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..80d3599
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+struct sve_u8_nhwc_max_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_u8_nhwc_max_generic_depthfirst_impl;
+
+  sve_u8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..7990a3d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_u8_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p2.b, x27, %x[n_channels]\n"
+    "whilelt p1.b, x26, %x[n_channels]\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z7.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z6.b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z5.b, #0x0\n"
+    "mov z4.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+    "add x19, x19, #0x20\n"
+    "umax z22.b, p4/M, z22.b, z29.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+    "umax z21.b, p4/M, z21.b, z26.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "umax z16.b, p4/M, z16.b, z25.b\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "umax z20.b, p4/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "umax z18.b, p4/M, z18.b, z22.b\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "umax z17.b, p4/M, z17.b, z21.b\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "umax z16.b, p4/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "umax z7.b, p4/M, z7.b, z19.b\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "umax z6.b, p4/M, z6.b, z18.b\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "umax z5.b, p4/M, z5.b, z17.b\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "umax z4.b, p4/M, z4.b, z16.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+    "umax z22.b, p4/M, z22.b, z29.b\n"
+    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+    "umax z21.b, p4/M, z21.b, z26.b\n"
+    "umax z16.b, p4/M, z16.b, z25.b\n"
+    "umax z20.b, p4/M, z20.b, z24.b\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "umax z18.b, p4/M, z18.b, z22.b\n"
+    "umax z17.b, p4/M, z17.b, z21.b\n"
+    "umax z16.b, p4/M, z16.b, z20.b\n"
+    "umax z7.b, p4/M, z7.b, z19.b\n"
+    "umax z6.b, p4/M, z6.b, z18.b\n"
+    "umax z5.b, p4/M, z5.b, z17.b\n"
+    "umax z4.b, p4/M, z4.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z7.b, p4/M, z7.b, z3.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "umax z6.b, p4/M, z6.b, z31.b\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "umax z5.b, p4/M, z5.b, z28.b\n"
+    "umax z4.b, p4/M, z4.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "incb x28, ALL, MUL #4\n"
+    "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+    "incb x27, ALL, MUL #4\n"
+    "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+    "incb x26, ALL, MUL #4\n"
+    "st1b { z4.b }, p0, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z7.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z7.b, p4/M, z7.b, z19.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "umax z7.b, p4/M, z7.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z7.b, p4/M, z7.b, z3.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp
new file mode 100644
index 0000000..098896d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct sve_u8q_nhwc_avg_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+
+  kern_type kernel = sve_u8q_nhwc_avg_generic_depthfirst_impl;
+
+  sve_u8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..368577c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstdint>
+#include <cstring>
+#include <cmath>
+
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace {
+  struct RescaleParams
+  {
+    int32_t multiplier, shift;
+  };
+
+  constexpr RescaleParams rescale_params[8] = {
+    {0x40000000, -0},  // 1/2
+    {0x55555555, -1},  // 1/3
+    {0x40000000, -1},  // 1/4
+    {0x66666666, -2},  // 1/5
+    {0x55555555, -2},  // 1/6
+    {0x49249249, -2},  // 1/7
+    {0x40000000, -2},  // 1/8
+    {0x71c71c72, -3},  // 1/9
+  };
+}
+
+void sve_u8q_nhwc_avg_generic_depthfirst_impl(
+  const uint64_t window_cells,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  if (n_valid_cells == 1 && window_cells == 1)
+  {
+    // In this case, simply copy from the input to the output
+    std::memcpy(outptr, *inptrs, n_channels);
+    return;
+  }
+
+  // Compute (or look up) the rescale values
+  int32_t shift_value = 0, rescale_value = 0;
+  if (2 <= window_cells && window_cells <= 9)
+  {
+    auto &params = rescale_params[window_cells - 2];
+    rescale_value = params.multiplier;
+    shift_value = params.shift;
+  }
+  else
+  {
+    auto f_rescale_value = 1.0f / static_cast<float>(window_cells);
+
+    shift_value = 0;
+    while (f_rescale_value < 0.5f)
+    {
+      shift_value--;
+      f_rescale_value *= 2.0f;
+    }
+
+    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    {
+      shift_value++;
+      rescale_value >>= 1;
+    }
+  }
+
+
+  // Initialise the accumulators such that the offsets are subtracted for all
+  // valid inputs.
+  const int32_t accumulator_init = -qp.input_offset * n_valid_cells;
+
+  // Combine together the rescale value for the requantization and the scaling
+  // factor for the average pool.
+  const int32_t shift = qp.per_layer_left_shift - qp.per_layer_right_shift + shift_value;
+  const int32_t left_shift = shift > 0 ? shift : 0;
+  const int32_t right_shift = shift <= 0 ? shift : 0;
+
+  int32_t combined_rescale_value = 0;
+  __asm__ __volatile__ (
+      "mov v16.s[0], %w[per_layer_mul]\n"
+      "mov v17.s[0], %w[rescale_value]\n"
+      "sqrdmulh s18, s16, s17\n"
+      "mov %w[combined_rescale_value], v18.s[0]\n"
+    : [combined_rescale_value] "=r" (combined_rescale_value)
+    : [per_layer_mul] "r" (qp.per_layer_mul), [rescale_value] "r" (rescale_value)
+    : "q16", "q17", "q18"
+  );
+
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x26, #0x0\n"
+    "cntb x25\n"
+    "cntb x24, ALL, MUL #2\n"
+    "cntb x23, ALL, MUL #3\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p2.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x24, %x[n_channels]\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
+    "mov z14.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.d, z15.d\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z12.d, z15.d\n"
+    "mov z11.d, z15.d\n"
+    "mov z10.d, z15.d\n"
+    "mov z9.d, z15.d\n"
+    "mov z8.d, z15.d\n"
+    "mov z7.d, z15.d\n"
+    "mov z6.d, z15.d\n"
+    "mov z5.d, z15.d\n"
+    "mov z4.d, z15.d\n"
+    "mov z3.d, z15.d\n"
+    "mov z2.d, z15.d\n"
+    "mov z1.d, z15.d\n"
+    "mov z0.d, z15.d\n"
+    "cbz x22, 4f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
+    ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
+    ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
+    ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
+    ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
+    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
+    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
+    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
+    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
+    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x4508abb0  // ushllb z16.h, z29.b, #0x0\n"
+    ".inst 0x4590496b  // uaddwb z11.s, z11.s, z16.h\n"
+    ".inst 0x45904d4a  // uaddwt z10.s, z10.s, z16.h\n"
+    ".inst 0x4508afb0  // ushllt z16.h, z29.b, #0x0\n"
+    ".inst 0x45904929  // uaddwb z9.s, z9.s, z16.h\n"
+    ".inst 0x45904d08  // uaddwt z8.s, z8.s, z16.h\n"
+    ".inst 0x4508ab70  // ushllb z16.h, z27.b, #0x0\n"
+    ".inst 0x459048e7  // uaddwb z7.s, z7.s, z16.h\n"
+    ".inst 0x45904cc6  // uaddwt z6.s, z6.s, z16.h\n"
+    ".inst 0x4508af70  // ushllt z16.h, z27.b, #0x0\n"
+    ".inst 0x459048a5  // uaddwb z5.s, z5.s, z16.h\n"
+    ".inst 0x45904c84  // uaddwt z4.s, z4.s, z16.h\n"
+    ".inst 0x4508ab30  // ushllb z16.h, z25.b, #0x0\n"
+    ".inst 0x45904863  // uaddwb z3.s, z3.s, z16.h\n"
+    ".inst 0x45904c42  // uaddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
+    ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z21.s, #0x0\n"
+    "ld1rw { z20.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "mov z19.s, #0xff\n"
+    "ld1rw { z18.s }, p4/Z, [%x[left_shift]]\n"
+    "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
+    ".inst 0x4482924f  // srshl z15.s, p4/M, z15.s, z18.s\n"
+    "ld1rw { z16.s }, p4/Z, [x19]\n"
+    ".inst 0x4482924e  // srshl z14.s, p4/M, z14.s, z18.s\n"
+    ".inst 0x4482924d  // srshl z13.s, p4/M, z13.s, z18.s\n"
+    ".inst 0x4482924c  // srshl z12.s, p4/M, z12.s, z18.s\n"
+    ".inst 0x4482924b  // srshl z11.s, p4/M, z11.s, z18.s\n"
+    ".inst 0x04b475ef  // sqrdmulh z15.s, z15.s, z20.s\n"
+    ".inst 0x04b475ce  // sqrdmulh z14.s, z14.s, z20.s\n"
+    ".inst 0x04b475ad  // sqrdmulh z13.s, z13.s, z20.s\n"
+    ".inst 0x04b4758c  // sqrdmulh z12.s, z12.s, z20.s\n"
+    ".inst 0x04b4756b  // sqrdmulh z11.s, z11.s, z20.s\n"
+    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
+    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
+    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
+    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    "add z15.s, z15.s, z16.s\n"
+    "add z14.s, z14.s, z16.s\n"
+    "add z13.s, z13.s, z16.s\n"
+    "add z12.s, z12.s, z16.s\n"
+    ".inst 0x4482922b  // srshl z11.s, p4/M, z11.s, z17.s\n"
+    ".inst 0x4482924a  // srshl z10.s, p4/M, z10.s, z18.s\n"
+    ".inst 0x44829249  // srshl z9.s, p4/M, z9.s, z18.s\n"
+    ".inst 0x44829248  // srshl z8.s, p4/M, z8.s, z18.s\n"
+    "add z11.s, z11.s, z16.s\n"
+    ".inst 0x04b4754a  // sqrdmulh z10.s, z10.s, z20.s\n"
+    ".inst 0x04b47529  // sqrdmulh z9.s, z9.s, z20.s\n"
+    ".inst 0x04b47508  // sqrdmulh z8.s, z8.s, z20.s\n"
+    ".inst 0x44829247  // srshl z7.s, p4/M, z7.s, z18.s\n"
+    ".inst 0x4482922a  // srshl z10.s, p4/M, z10.s, z17.s\n"
+    ".inst 0x44829229  // srshl z9.s, p4/M, z9.s, z17.s\n"
+    ".inst 0x44829228  // srshl z8.s, p4/M, z8.s, z17.s\n"
+    ".inst 0x04b474e7  // sqrdmulh z7.s, z7.s, z20.s\n"
+    "add z10.s, z10.s, z16.s\n"
+    "add z9.s, z9.s, z16.s\n"
+    "add z8.s, z8.s, z16.s\n"
+    ".inst 0x44829227  // srshl z7.s, p4/M, z7.s, z17.s\n"
+    ".inst 0x44829246  // srshl z6.s, p4/M, z6.s, z18.s\n"
+    ".inst 0x44829245  // srshl z5.s, p4/M, z5.s, z18.s\n"
+    ".inst 0x44829244  // srshl z4.s, p4/M, z4.s, z18.s\n"
+    "add z7.s, z7.s, z16.s\n"
+    ".inst 0x04b474c6  // sqrdmulh z6.s, z6.s, z20.s\n"
+    ".inst 0x04b474a5  // sqrdmulh z5.s, z5.s, z20.s\n"
+    ".inst 0x04b47484  // sqrdmulh z4.s, z4.s, z20.s\n"
+    ".inst 0x44829243  // srshl z3.s, p4/M, z3.s, z18.s\n"
+    ".inst 0x44829226  // srshl z6.s, p4/M, z6.s, z17.s\n"
+    ".inst 0x44829225  // srshl z5.s, p4/M, z5.s, z17.s\n"
+    ".inst 0x44829224  // srshl z4.s, p4/M, z4.s, z17.s\n"
+    ".inst 0x04b47463  // sqrdmulh z3.s, z3.s, z20.s\n"
+    "add z6.s, z6.s, z16.s\n"
+    "add z5.s, z5.s, z16.s\n"
+    "add z4.s, z4.s, z16.s\n"
+    ".inst 0x44829223  // srshl z3.s, p4/M, z3.s, z17.s\n"
+    ".inst 0x44829242  // srshl z2.s, p4/M, z2.s, z18.s\n"
+    ".inst 0x44829241  // srshl z1.s, p4/M, z1.s, z18.s\n"
+    ".inst 0x44829240  // srshl z0.s, p4/M, z0.s, z18.s\n"
+    "add z3.s, z3.s, z16.s\n"
+    ".inst 0x04b47442  // sqrdmulh z2.s, z2.s, z20.s\n"
+    ".inst 0x04b47421  // sqrdmulh z1.s, z1.s, z20.s\n"
+    ".inst 0x04b47400  // sqrdmulh z0.s, z0.s, z20.s\n"
+    "smax z15.s, p4/M, z15.s, z21.s\n"
+    ".inst 0x44829222  // srshl z2.s, p4/M, z2.s, z17.s\n"
+    ".inst 0x44829221  // srshl z1.s, p4/M, z1.s, z17.s\n"
+    ".inst 0x44829220  // srshl z0.s, p4/M, z0.s, z17.s\n"
+    "smin z15.s, p4/M, z15.s, z19.s\n"
+    "add z2.s, z2.s, z16.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "smax z14.s, p4/M, z14.s, z21.s\n"
+    "smax z13.s, p4/M, z13.s, z21.s\n"
+    "smax z12.s, p4/M, z12.s, z21.s\n"
+    "smax z11.s, p4/M, z11.s, z21.s\n"
+    "smin z14.s, p4/M, z14.s, z19.s\n"
+    "smin z13.s, p4/M, z13.s, z19.s\n"
+    "smin z12.s, p4/M, z12.s, z19.s\n"
+    "smin z11.s, p4/M, z11.s, z19.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "smax z10.s, p4/M, z10.s, z21.s\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "smax z9.s, p4/M, z9.s, z21.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "smin z10.s, p4/M, z10.s, z19.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smin z9.s, p4/M, z9.s, z19.s\n"
+    "smax z8.s, p4/M, z8.s, z21.s\n"
+    "smax z7.s, p4/M, z7.s, z21.s\n"
+    "smax z6.s, p4/M, z6.s, z21.s\n"
+    "trn1 z18.h, z11.h, z10.h\n"
+    "smin z8.s, p4/M, z8.s, z19.s\n"
+    "smin z7.s, p4/M, z7.s, z19.s\n"
+    "smin z6.s, p4/M, z6.s, z19.s\n"
+    "smax z5.s, p4/M, z5.s, z21.s\n"
+    "trn1 z16.h, z9.h, z8.h\n"
+    "smax z4.s, p4/M, z4.s, z21.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+    "smin z5.s, p4/M, z5.s, z19.s\n"
+    "incb x25, ALL, MUL #4\n"
+    "smin z4.s, p4/M, z4.s, z19.s\n"
+    "smax z3.s, p4/M, z3.s, z21.s\n"
+    "smax z2.s, p4/M, z2.s, z21.s\n"
+    "smax z1.s, p4/M, z1.s, z21.s\n"
+    "smax z0.s, p4/M, z0.s, z21.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "smin z3.s, p4/M, z3.s, z19.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+    "smin z2.s, p4/M, z2.s, z19.s\n"
+    "incb x24, ALL, MUL #4\n"
+    "smin z1.s, p4/M, z1.s, z19.s\n"
+    "smin z0.s, p4/M, z0.s, z19.s\n"
+    "trn1 z17.h, z3.h, z2.h\n"
+    "trn1 z16.h, z1.h, z0.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "incb x23, ALL, MUL #4\n"
+    "whilelt p0.b, x23, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
+    "mov z14.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z13.d, z15.d\n"
+    "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z12.d, z15.d\n"
+    "cbz x22, 11f\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "subs x22, x22, #0x1\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 2 inputs loop
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
+    "add x19, x19, #0x10\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "subs x22, x22, #0x1\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 2 inputs tail
+    ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
+    ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x1\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x21, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
+    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
+    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
+    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
+    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
+    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
+    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z21.s, #0x0\n"
+    "ld1rw { z20.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "mov z19.s, #0xff\n"
+    "ld1rw { z18.s }, p4/Z, [%x[left_shift]]\n"
+    "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
+    ".inst 0x4482924f  // srshl z15.s, p4/M, z15.s, z18.s\n"
+    "ld1rw { z16.s }, p4/Z, [x19]\n"
+    ".inst 0x4482924e  // srshl z14.s, p4/M, z14.s, z18.s\n"
+    ".inst 0x4482924d  // srshl z13.s, p4/M, z13.s, z18.s\n"
+    ".inst 0x4482924c  // srshl z12.s, p4/M, z12.s, z18.s\n"
+    ".inst 0x04b475ef  // sqrdmulh z15.s, z15.s, z20.s\n"
+    ".inst 0x04b475ce  // sqrdmulh z14.s, z14.s, z20.s\n"
+    ".inst 0x04b475ad  // sqrdmulh z13.s, z13.s, z20.s\n"
+    ".inst 0x04b4758c  // sqrdmulh z12.s, z12.s, z20.s\n"
+    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
+    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
+    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
+    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    "add z15.s, z15.s, z16.s\n"
+    "add z14.s, z14.s, z16.s\n"
+    "add z13.s, z13.s, z16.s\n"
+    "add z12.s, z12.s, z16.s\n"
+    "smax z15.s, p4/M, z15.s, z21.s\n"
+    "smax z14.s, p4/M, z14.s, z21.s\n"
+    "smax z13.s, p4/M, z13.s, z21.s\n"
+    "smax z12.s, p4/M, z12.s, z21.s\n"
+    "smin z15.s, p4/M, z15.s, z19.s\n"
+    "smin z14.s, p4/M, z14.s, z19.s\n"
+    "smin z13.s, p4/M, z13.s, z19.s\n"
+    "smin z12.s, p4/M, z12.s, z19.s\n"
+    "trn1 z17.h, z15.h, z14.h\n"
+    "trn1 z16.h, z13.h, z12.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "incb x26\n"
+    "whilelt p3.b, x26, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [outptr] "r" (outptr), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp
new file mode 100644
index 0000000..7633db1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+void sve_u8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+struct sve_u8q_nhwc_max_generic_depthfirst
+{
+  typedef uint8_t operand_type;
+  typedef uint8_t return_type;
+
+  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
+
+  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+
+  kern_type kernel = sve_u8q_nhwc_max_generic_depthfirst_impl;
+
+  sve_u8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
new file mode 100644
index 0000000..c104088
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pooling.hpp"
+#include <cstdint>
+
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+
+namespace arm_conv {
+namespace pooling {
+
+
+void sve_u8q_nhwc_max_generic_depthfirst_impl(
+  const uint64_t,
+  const uint64_t n_valid_cells,
+  uint64_t n_channels,
+  const uint8_t *const *const inptrs,
+  uint8_t *outptr,
+  const Requantize32 &qp
+)
+{
+  __asm__ __volatile__(
+    "ptrue p4.b\n"
+    "mov x28, #0x0\n"
+    "cntb x27\n"
+    "cntb x26, ALL, MUL #2\n"
+    "cntb x25, ALL, MUL #3\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p2.b, x27, %x[n_channels]\n"
+    "whilelt p1.b, x26, %x[n_channels]\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.none 7f\n"
+    "1:"  // 4-vectors of channels
+    "mov z10.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z9.b, #0x0\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x0\n"
+    "mov z7.b, #0x0\n"
+    "cbz x24, 4f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "beq 3f\n"
+    "2:"  // 4-vectors of channels: 4 inputs loop
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+    "add x19, x19, #0x20\n"
+    "umax z22.b, p4/M, z22.b, z29.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+    "umax z21.b, p4/M, z21.b, z26.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "umax z16.b, p4/M, z16.b, z25.b\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "umax z20.b, p4/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "umax z18.b, p4/M, z18.b, z22.b\n"
+    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
+    "umax z17.b, p4/M, z17.b, z21.b\n"
+    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
+    "umax z16.b, p4/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
+    "umax z10.b, p4/M, z10.b, z19.b\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "umax z9.b, p4/M, z9.b, z18.b\n"
+    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
+    "umax z8.b, p4/M, z8.b, z17.b\n"
+    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
+    "umax z7.b, p4/M, z7.b, z16.b\n"
+    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "bgt 2b\n"
+    "3:"  // 4-vectors of channels: 4 inputs tail
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+    "umax z22.b, p4/M, z22.b, z29.b\n"
+    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+    "umax z21.b, p4/M, z21.b, z26.b\n"
+    "umax z16.b, p4/M, z16.b, z25.b\n"
+    "umax z20.b, p4/M, z20.b, z24.b\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "umax z18.b, p4/M, z18.b, z22.b\n"
+    "umax z17.b, p4/M, z17.b, z21.b\n"
+    "umax z16.b, p4/M, z16.b, z20.b\n"
+    "umax z10.b, p4/M, z10.b, z19.b\n"
+    "umax z9.b, p4/M, z9.b, z18.b\n"
+    "umax z8.b, p4/M, z8.b, z17.b\n"
+    "umax z7.b, p4/M, z7.b, z16.b\n"
+    "4:"  // 4-vectors of channels: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 6f\n"
+    "5:"  // 4-vectors of channels: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z10.b, p4/M, z10.b, z3.b\n"
+    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
+    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
+    "umax z9.b, p4/M, z9.b, z31.b\n"
+    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
+    "umax z8.b, p4/M, z8.b, z28.b\n"
+    "umax z7.b, p4/M, z7.b, z16.b\n"
+    "bgt 5b\n"
+    "6:"  // 4-vectors of channels: Single input loop: End
+    "mov z6.s, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1rw { z5.s }, p4/Z, [x19]\n"
+    "mov z4.s, #0xff\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    ".inst 0x4508a951  // ushllb z17.h, z10.b, #0x0\n"
+    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    ".inst 0x4508ad50  // ushllt z16.h, z10.b, #0x0\n"
+    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    ".inst 0x4508a937  // ushllb z23.h, z9.b, #0x0\n"
+    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    ".inst 0x4508ad36  // ushllt z22.h, z9.b, #0x0\n"
+    "ld1rw { z0.s }, p4/Z, [x19]\n"
+    ".inst 0x4508a912  // ushllb z18.h, z8.b, #0x0\n"
+    ".inst 0x4508ad15  // ushllt z21.h, z8.b, #0x0\n"
+    ".inst 0x4508a8f4  // ushllb z20.h, z7.b, #0x0\n"
+    ".inst 0x4508acf3  // ushllt z19.h, z7.b, #0x0\n"
+    "neg z5.s, p4/M, z5.s\n"
+    ".inst 0x459140bf  // saddwb z31.s, z5.s, z17.h\n"
+    ".inst 0x459144b1  // saddwt z17.s, z5.s, z17.h\n"
+    ".inst 0x459040be  // saddwb z30.s, z5.s, z16.h\n"
+    ".inst 0x459044b0  // saddwt z16.s, z5.s, z16.h\n"
+    ".inst 0x459740bd  // saddwb z29.s, z5.s, z23.h\n"
+    ".inst 0x459744bc  // saddwt z28.s, z5.s, z23.h\n"
+    ".inst 0x459640bb  // saddwb z27.s, z5.s, z22.h\n"
+    ".inst 0x459644ba  // saddwt z26.s, z5.s, z22.h\n"
+    ".inst 0x459240b9  // saddwb z25.s, z5.s, z18.h\n"
+    ".inst 0x459244b2  // saddwt z18.s, z5.s, z18.h\n"
+    ".inst 0x459540b8  // saddwb z24.s, z5.s, z21.h\n"
+    ".inst 0x459544b7  // saddwt z23.s, z5.s, z21.h\n"
+    ".inst 0x459440b6  // saddwb z22.s, z5.s, z20.h\n"
+    ".inst 0x459444b5  // saddwt z21.s, z5.s, z20.h\n"
+    ".inst 0x459340b4  // saddwb z20.s, z5.s, z19.h\n"
+    ".inst 0x459344b3  // saddwt z19.s, z5.s, z19.h\n"
+    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
+    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
+    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
+    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
+    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
+    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
+    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
+    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
+    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
+    "add z31.s, z31.s, z0.s\n"
+    "add z17.s, z17.s, z0.s\n"
+    "add z30.s, z30.s, z0.s\n"
+    "add z16.s, z16.s, z0.s\n"
+    ".inst 0x4482905d  // srshl z29.s, p4/M, z29.s, z2.s\n"
+    ".inst 0x4482905c  // srshl z28.s, p4/M, z28.s, z2.s\n"
+    ".inst 0x4482905b  // srshl z27.s, p4/M, z27.s, z2.s\n"
+    ".inst 0x4482905a  // srshl z26.s, p4/M, z26.s, z2.s\n"
+    ".inst 0x04a377bd  // sqrdmulh z29.s, z29.s, z3.s\n"
+    ".inst 0x04a3779c  // sqrdmulh z28.s, z28.s, z3.s\n"
+    ".inst 0x04a3777b  // sqrdmulh z27.s, z27.s, z3.s\n"
+    ".inst 0x04a3775a  // sqrdmulh z26.s, z26.s, z3.s\n"
+    ".inst 0x4482903d  // srshl z29.s, p4/M, z29.s, z1.s\n"
+    ".inst 0x4482903c  // srshl z28.s, p4/M, z28.s, z1.s\n"
+    ".inst 0x4482903b  // srshl z27.s, p4/M, z27.s, z1.s\n"
+    ".inst 0x4482903a  // srshl z26.s, p4/M, z26.s, z1.s\n"
+    "add z29.s, z29.s, z0.s\n"
+    "add z28.s, z28.s, z0.s\n"
+    "add z27.s, z27.s, z0.s\n"
+    "add z26.s, z26.s, z0.s\n"
+    ".inst 0x44829059  // srshl z25.s, p4/M, z25.s, z2.s\n"
+    ".inst 0x44829052  // srshl z18.s, p4/M, z18.s, z2.s\n"
+    "smax z31.s, p4/M, z31.s, z6.s\n"
+    "smax z17.s, p4/M, z17.s, z6.s\n"
+    ".inst 0x04a37739  // sqrdmulh z25.s, z25.s, z3.s\n"
+    ".inst 0x04a37652  // sqrdmulh z18.s, z18.s, z3.s\n"
+    "smin z31.s, p4/M, z31.s, z4.s\n"
+    "smin z17.s, p4/M, z17.s, z4.s\n"
+    ".inst 0x44829039  // srshl z25.s, p4/M, z25.s, z1.s\n"
+    ".inst 0x44829032  // srshl z18.s, p4/M, z18.s, z1.s\n"
+    "smax z30.s, p4/M, z30.s, z6.s\n"
+    "trn1 z17.h, z31.h, z17.h\n"
+    "add z25.s, z25.s, z0.s\n"
+    "add z18.s, z18.s, z0.s\n"
+    ".inst 0x44829058  // srshl z24.s, p4/M, z24.s, z2.s\n"
+    ".inst 0x44829057  // srshl z23.s, p4/M, z23.s, z2.s\n"
+    "smin z30.s, p4/M, z30.s, z4.s\n"
+    "smax z16.s, p4/M, z16.s, z6.s\n"
+    ".inst 0x04a37718  // sqrdmulh z24.s, z24.s, z3.s\n"
+    ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
+    "smax z29.s, p4/M, z29.s, z6.s\n"
+    "smin z16.s, p4/M, z16.s, z4.s\n"
+    ".inst 0x44829038  // srshl z24.s, p4/M, z24.s, z1.s\n"
+    ".inst 0x44829037  // srshl z23.s, p4/M, z23.s, z1.s\n"
+    "smin z29.s, p4/M, z29.s, z4.s\n"
+    "trn1 z16.h, z30.h, z16.h\n"
+    "add z24.s, z24.s, z0.s\n"
+    "add z23.s, z23.s, z0.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    ".inst 0x44829056  // srshl z22.s, p4/M, z22.s, z2.s\n"
+    "incb x28, ALL, MUL #4\n"
+    ".inst 0x44829055  // srshl z21.s, p4/M, z21.s, z2.s\n"
+    ".inst 0x44829054  // srshl z20.s, p4/M, z20.s, z2.s\n"
+    ".inst 0x44829053  // srshl z19.s, p4/M, z19.s, z2.s\n"
+    "smax z28.s, p4/M, z28.s, z6.s\n"
+    ".inst 0x04a376d6  // sqrdmulh z22.s, z22.s, z3.s\n"
+    ".inst 0x04a376b5  // sqrdmulh z21.s, z21.s, z3.s\n"
+    ".inst 0x04a37694  // sqrdmulh z20.s, z20.s, z3.s\n"
+    ".inst 0x04a37673  // sqrdmulh z19.s, z19.s, z3.s\n"
+    ".inst 0x44829036  // srshl z22.s, p4/M, z22.s, z1.s\n"
+    ".inst 0x44829035  // srshl z21.s, p4/M, z21.s, z1.s\n"
+    ".inst 0x44829034  // srshl z20.s, p4/M, z20.s, z1.s\n"
+    ".inst 0x44829033  // srshl z19.s, p4/M, z19.s, z1.s\n"
+    "add z22.s, z22.s, z0.s\n"
+    "add z21.s, z21.s, z0.s\n"
+    "add z20.s, z20.s, z0.s\n"
+    "add z19.s, z19.s, z0.s\n"
+    "smax z27.s, p4/M, z27.s, z6.s\n"
+    "smax z26.s, p4/M, z26.s, z6.s\n"
+    "smax z25.s, p4/M, z25.s, z6.s\n"
+    "smin z28.s, p4/M, z28.s, z4.s\n"
+    "smin z27.s, p4/M, z27.s, z4.s\n"
+    "smin z26.s, p4/M, z26.s, z4.s\n"
+    "smin z25.s, p4/M, z25.s, z4.s\n"
+    "trn1 z17.h, z29.h, z28.h\n"
+    "smax z18.s, p4/M, z18.s, z6.s\n"
+    "trn1 z16.h, z27.h, z26.h\n"
+    "smax z24.s, p4/M, z24.s, z6.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p2, [%x[outptr], x27]\n"
+    "smin z18.s, p4/M, z18.s, z4.s\n"
+    "incb x27, ALL, MUL #4\n"
+    "smin z24.s, p4/M, z24.s, z4.s\n"
+    "smax z23.s, p4/M, z23.s, z6.s\n"
+    "smax z22.s, p4/M, z22.s, z6.s\n"
+    "smax z21.s, p4/M, z21.s, z6.s\n"
+    "trn1 z18.h, z25.h, z18.h\n"
+    "smin z23.s, p4/M, z23.s, z4.s\n"
+    "smin z22.s, p4/M, z22.s, z4.s\n"
+    "smin z21.s, p4/M, z21.s, z4.s\n"
+    "smax z20.s, p4/M, z20.s, z6.s\n"
+    "trn1 z16.h, z24.h, z23.h\n"
+    "smax z19.s, p4/M, z19.s, z6.s\n"
+    "trn1 z17.h, z22.h, z21.h\n"
+    "trn1 z16.b, z18.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x26]\n"
+    "smin z20.s, p4/M, z20.s, z4.s\n"
+    "incb x26, ALL, MUL #4\n"
+    "smin z19.s, p4/M, z19.s, z4.s\n"
+    "trn1 z16.h, z20.h, z19.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p0, [%x[outptr], x25]\n"
+    "incb x25, ALL, MUL #4\n"
+    "whilelt p0.b, x25, %x[n_channels]\n"
+    "b.any 1b\n"
+    "7:"  // Single vector of channels
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.none 14f\n"
+    "8:"  // Single vector of channels: Loop
+    "mov z10.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "cbz x24, 11f\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "beq 10f\n"
+    "9:"  // Single vector of channels: Loop: 4 inputs loop
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "ldp x23, x22, [x19, #0x0]\n"
+    "subs x24, x24, #0x1\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "ldp x21, x20, [x19, #0x10]\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z10.b, p4/M, z10.b, z19.b\n"
+    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "bgt 9b\n"
+    "10:"  // Single vector of channels: Loop: 4 inputs tail
+    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
+    "umax z19.b, p4/M, z19.b, z23.b\n"
+    "umax z10.b, p4/M, z10.b, z19.b\n"
+    "11:"  // Single vector of channels: Loop: After loop
+    "ands x20, %x[n_valid_cells], #0x3\n"
+    "beq 13f\n"
+    "12:"  // Single vector of channels: Loop: Single input loop
+    "ldr x23, [x19], #0x8\n"
+    "subs x20, x20, #0x1\n"
+    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
+    "umax z10.b, p4/M, z10.b, z3.b\n"
+    "bgt 12b\n"
+    "13:"  // Single vector of channels: Loop: Single input loop: End
+    "mov z6.s, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+    "ld1rw { z5.s }, p4/Z, [x19]\n"
+    "mov z4.s, #0xff\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    ".inst 0x4508a951  // ushllb z17.h, z10.b, #0x0\n"
+    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    ".inst 0x4508ad50  // ushllt z16.h, z10.b, #0x0\n"
+    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "neg z5.s, p4/M, z5.s\n"
+    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    ".inst 0x459140bf  // saddwb z31.s, z5.s, z17.h\n"
+    "ld1rw { z0.s }, p4/Z, [x19]\n"
+    ".inst 0x459144b1  // saddwt z17.s, z5.s, z17.h\n"
+    ".inst 0x459040be  // saddwb z30.s, z5.s, z16.h\n"
+    ".inst 0x459044b0  // saddwt z16.s, z5.s, z16.h\n"
+    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
+    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
+    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
+    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
+    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
+    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
+    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
+    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
+    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
+    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
+    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
+    "add z31.s, z31.s, z0.s\n"
+    "add z17.s, z17.s, z0.s\n"
+    "add z30.s, z30.s, z0.s\n"
+    "add z16.s, z16.s, z0.s\n"
+    "smax z31.s, p4/M, z31.s, z6.s\n"
+    "smax z17.s, p4/M, z17.s, z6.s\n"
+    "smax z30.s, p4/M, z30.s, z6.s\n"
+    "smax z16.s, p4/M, z16.s, z6.s\n"
+    "smin z31.s, p4/M, z31.s, z4.s\n"
+    "smin z17.s, p4/M, z17.s, z4.s\n"
+    "smin z30.s, p4/M, z30.s, z4.s\n"
+    "smin z16.s, p4/M, z16.s, z4.s\n"
+    "trn1 z17.h, z31.h, z17.h\n"
+    "trn1 z16.h, z30.h, z16.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    "incb x28\n"
+    "whilelt p3.b, x28, %x[n_channels]\n"
+    "b.any 8b\n"
+    "14:"  // End
+
+    :
+    : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+  );
+}
+
+}  // namespace pooling
+}  // namespace arm_conv
+
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
new file mode 100644
index 0000000..ad95207
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "pool_common.hpp"
+#include "utils.hpp"
+
+#include "arm_compute/core/Types.h"
+#include <limits>
+
+namespace arm_conv {
+namespace pooling {
+
+template <class strategy>
+class PoolingDepthfirst : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
+{
+  using TInput = typename strategy::operand_type;
+  using TOutput = typename strategy::return_type;
+
+  const PoolingArgs m_args;  // Copy of arguments
+
+  constexpr static unsigned int input_rows(void)
+  {
+    return (strategy::out_rows() - 1)*strategy::stride_rows() + strategy::pool_rows();
+  }
+
+  constexpr static unsigned int input_cols(void)
+  {
+    return (strategy::out_cols() - 1)*strategy::stride_cols() + strategy::pool_cols();
+  }
+
+  size_t sizeof_input_buffer(void) const
+  {
+    return sizeof(TInput) * m_args.n_channels;
+  }
+
+  size_t sizeof_output_buffer(void) const
+  {
+    return sizeof(TOutput) * m_args.n_channels;
+  }
+
+  public:
+  PoolingDepthfirst(const PoolingArgs &args) : m_args(args)
+  {
+  }
+
+  PoolingDepthfirst(PoolingDepthfirst &) = delete;
+  PoolingDepthfirst &operator=(PoolingDepthfirst &) = delete;
+
+  size_t get_working_size(unsigned int num_threads) const override
+  {
+    // We require a channel-length vector of input padding values
+    // (to be shared amongst all threads) and (for each thread) a
+    // channel-length vector in which to dump surplus output.
+    return sizeof_input_buffer() + num_threads * sizeof_output_buffer();
+  }
+
+  void execute(
+    const void *const input,
+    void *const output,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    const size_t ld_input_col = m_args.n_channels;
+    const size_t ld_input_row = ld_input_col * m_args.input_cols;
+    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
+    const size_t ld_output_col = ld_input_col;
+    const size_t ld_output_row = ld_output_col * m_args.output_cols;
+    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
+
+    execute(
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    const void *const input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    void *const output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    execute(
+      m_args.n_batches, m_args.input_rows, m_args.input_cols,
+      m_args.n_channels,
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      m_args.padding,
+      m_args.output_rows, m_args.output_cols,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    unsigned int batches,
+    unsigned int height,
+    unsigned int width,
+    unsigned int channels,
+    const void *const _input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    const PaddingValues &padding,
+    unsigned int output_height,
+    unsigned int output_width,
+    void *const _output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const _working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    ARM_COMPUTE_UNUSED(batches, ld_input_batch, ld_output_batch);
+    strategy strat(m_args.cpu_info);
+#ifdef CYCLE_PROFILING
+    arm_gemm::profiler prof;
+#endif // CYCLE_PROFILING
+
+    // Cast input and output pointers into the right types
+    const TInput *const inptr = static_cast<const TInput *>(_input);
+    TOutput *const outptr = static_cast<TOutput *>(_output);
+
+    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
+    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
+    const int start_out_height = static_cast<int>(thread_id * rows_per_thread);
+    const int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));
+
+    // Create an array for the input pointers
+    const TInput * _inptr_array[input_rows() * input_cols()];
+    const TInput **const inptr_array = _inptr_array;
+
+    // Create an array for the output pointers
+    TOutput * _outptr_array[strategy::out_rows() * strategy::out_cols()];
+    TOutput **const outptr_array = _outptr_array;
+
+    // Allocate portions of the working space
+    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
+    TOutput *const output_buffer = reinterpret_cast<TOutput *>(working_space + thread_id * sizeof_output_buffer());
+    TInput *const input_buffer = reinterpret_cast<TInput *>(working_space + num_threads * sizeof_output_buffer());
+
+    // Initialise the input buffer
+    for (unsigned int c = 0; c < channels; c++)
+    {
+      TInput &val = input_buffer[c];
+
+      if (strategy::pooling_type() == PoolingType::AVERAGE)
+      {
+        val = static_cast<TInput>(0);
+      }
+      else if (strategy::pooling_type() == PoolingType::MAX)
+      {
+#if defined(__aarch64__)
+        using InputType = typename std::conditional<std::is_same<TInput, __fp16>::value, arm_compute::half, TInput>::type;
+        using limits = std::numeric_limits<InputType>;
+#else // defined(__aarch64__)
+        using limits = std::numeric_limits<TInput>;
+#endif // defined(__aarch64__)
+        if (limits::has_infinity)
+        {
+          val = -limits::infinity();
+        }
+        else
+        {
+          val = limits::min();
+        }
+      }
+    }
+
+    // For each output tile, construct the requisite set of pointers and call
+    // into the kernel.
+    for (unsigned int batch = 0; batch < batches; batch++)
+    {
+      // Get batch pointers
+      const auto inptr_batch = inptr + batch * ld_input_batch;
+      const auto outptr_batch = outptr + batch * ld_output_batch;
+
+      for (int start_out_i = start_out_height;
+           start_out_i < end_out_height;
+           start_out_i += static_cast<int>(strategy::out_rows()))
+      {
+        const int end_out_i = start_out_i + strategy::out_rows();
+        const int start_in_i = start_out_i * strategy::stride_rows() - padding.top;
+        const int end_in_i = start_in_i + input_rows();
+
+        // Compute top/bottom padding - TODO Is this right for average pooling?
+        const auto pad_top = static_cast<unsigned int>(-std::min(start_in_i, 0));
+        const auto pad_bottom = static_cast<unsigned int>(-std::min(static_cast<int>(height) - end_in_i, 0));
+        const unsigned int valid_output_rows = std::min(
+          end_out_i - start_out_i,
+          static_cast<int>(end_out_height) - start_out_i
+        );
+
+        // Fill the input pointer array with padding values
+        for (auto index = 0u; index < input_rows() * input_cols(); index++)
+        {
+          inptr_array[index] = input_buffer;
+        }
+
+        for (int start_out_j = 0, start_in_j = -padding.left;
+             start_out_j < static_cast<int>(output_width);
+             start_out_j += static_cast<int>(strategy::out_cols()),
+             start_in_j += static_cast<int>(strategy::out_cols()) * strategy::stride_cols())
+        {
+          const int end_out_j = start_out_j + strategy::out_cols();
+          const int end_in_j = start_in_j + input_cols();
+
+          // Compute left/right padding - TODO Is this right for average pooling?
+          const auto pad_left = static_cast<unsigned int>(-std::min(start_in_j, 0));
+          const auto pad_right = static_cast<unsigned int>(-std::min(static_cast<int>(width) - end_in_j, 0));
+
+          const unsigned int valid_output_cols = std::min(
+            end_out_j - start_out_j,
+            static_cast<int>(output_width) - start_out_j
+          );
+
+          // Construct the input pointer array - fill the array with pointers to
+          // the input buffer and then fill in the required values.
+          for (auto i = pad_top; i < input_rows() - pad_bottom; i++)
+          {
+            // Can skip over the left padding because we will have either the
+            // same or less than the previous tile.
+            unsigned int j = pad_left;
+            const TInput *colptr = inptr_batch + (start_in_i + i) * ld_input_row + (start_in_j + j) * ld_input_col;
+            const TInput **ptrs = inptr_array + i * input_cols() + j;
+            for (; j < input_cols() - pad_right; j++)
+            {
+              *(ptrs++) = colptr;
+              colptr += ld_input_col;
+            }
+            for (; j < input_cols(); j++)
+            {
+              *(ptrs++) = input_buffer;
+            }
+          }
+
+          // Construct the output pointer array.
+          TOutput **outptr_pos = outptr_array;
+          for (auto i = 0u; i < valid_output_rows; i++)
+          {
+            unsigned int j = 0u;
+            TOutput *colptr = outptr_batch + (start_out_i + i) * ld_output_row + start_out_j * ld_output_col;
+            for (; j < valid_output_cols; j++)
+            {
+              *(outptr_pos++) = colptr;
+               colptr += ld_output_col;
+            }
+            for (; j < strategy::out_cols(); j++)
+            {
+              *(outptr_pos++) = output_buffer;
+            }
+          }
+          for (auto i = valid_output_rows; i < strategy::out_rows(); i++)
+          {
+            for (auto j = 0u; j < strategy::out_cols(); j++)
+            {
+              *(outptr_pos++) = output_buffer;
+            }
+          }
+
+#ifdef CYCLE_PROFILING
+          // TODO Work number
+          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::out_rows() * strategy::out_cols() * strategy::pool_rows() * strategy::pool_cols()));
+#endif
+          strat.kernel(
+            channels, inptr_array, outptr_array,
+            m_args.exclude_padding, pad_left, pad_top, pad_right, pad_bottom
+          );
+        }
+      }
+    }
+  }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_cache_oblivious.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_cache_oblivious.hpp
new file mode 100644
index 0000000..4aabd95
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_cache_oblivious.hpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "pool_common.hpp"
+
+#include <stack>
+#include <vector>
+
+namespace arm_conv {
+namespace pooling {
+
+template <class strategy>
+class PoolingDepthfirstCacheOblivious : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
+{
+  using TInput = typename strategy::operand_type;
+  using TOutput = typename strategy::return_type;
+
+  const PoolingArgs m_args;  // Copy of arguments
+
+  constexpr static unsigned int input_rows(void)
+  {
+    return (strategy::out_rows() - 1)*strategy::stride_rows() + strategy::pool_rows();
+  }
+
+  constexpr static unsigned int input_cols(void)
+  {
+    return (strategy::out_cols() - 1)*strategy::stride_cols() + strategy::pool_cols();
+  }
+
+  size_t sizeof_input_buffer(void) const
+  {
+    return sizeof(TInput) * m_args.n_channels;
+  }
+
+  size_t sizeof_output_buffer(void) const
+  {
+    return sizeof(TOutput) * m_args.n_channels;
+  }
+
+  public:
+  PoolingDepthfirstCacheOblivious(const PoolingArgs &args) : m_args(args)
+  {
+  }
+
+  PoolingDepthfirstCacheOblivious(PoolingDepthfirstCacheOblivious &) = delete;
+  PoolingDepthfirstCacheOblivious &operator=(PoolingDepthfirstCacheOblivious &) = delete;
+
+  size_t get_working_size(void) const override
+  {
+    // We require an array of pointers for the inputs and outputs, a
+    // channel-length vector in which to dump surplus output, and a
+    // channel-length vector of padding values.
+    return sizeof_input_buffer() + sizeof_output_buffer();
+  }
+
+  void execute(
+    const void *const input,
+    void *const output,
+    void *const working_space
+  ) const override
+  {
+    const size_t ld_input_col = m_args.n_channels;
+    const size_t ld_input_row = ld_input_col * m_args.input_cols;
+    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
+    const size_t ld_output_col = ld_input_col;
+    const size_t ld_output_row = ld_output_col * m_args.output_cols;
+    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
+
+    execute(
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space
+    );
+  }
+
+  void execute(
+    const void *const input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    void *const output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const working_space
+  ) const override
+  {
+    execute(
+      m_args.n_batches, m_args.input_rows, m_args.input_cols,
+      m_args.n_channels,
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      m_args.padding,
+      m_args.output_rows, m_args.output_cols,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space
+    );
+  }
+
+  void execute(
+    unsigned int batches,
+    unsigned int input_height,
+    unsigned int input_width,
+    unsigned int channels,
+    const void *const _input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    const PaddingValues &padding,
+    unsigned int output_height,
+    unsigned int output_width,
+    void *const _output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const _working_space
+  ) const override
+  {
+    strategy strat(m_args.cpu_info);
+#ifdef CYCLE_PROFILING
+    arm_gemm::profiler prof;
+#endif // CYCLE_PROFILING
+
+    // Cast input and output pointers into the right types
+    const TInput *const inptr = static_cast<const TInput *>(_input);
+    TOutput *const outptr = static_cast<TOutput *>(_output);
+
+    // Allocate portions of the working space
+    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
+    TOutput *const output_buffer = reinterpret_cast<TOutput *>(working_space);
+    TInput *const input_buffer = reinterpret_cast<TInput *>(working_space + sizeof_output_buffer());
+
+    // Fill the input buffer
+    const TInput pad_value = (m_args.pool_type == PoolingType::AVERAGE)
+                           ? static_cast<TInput>(0)
+                           : (std::numeric_limits<TInput>::has_infinity
+                              ? -std::numeric_limits<TInput>::infinity()
+                              : std::numeric_limits<TInput>::lowest());
+    for (unsigned int i = 0; i < channels; i++)
+    {
+      input_buffer[i] = pad_value;
+    }
+
+    // Keep subdividing the output plane across the longest dimension until we
+    // reach the size of the tile. Queue items for later processing. Note - we
+    // can determine the largest size of the queue a priori from the input
+    // tensor size, this would allow us to allocate memory within the working
+    // space and improve performance.
+    struct WorkItem
+    {
+      unsigned int output_i, output_j;
+      unsigned int output_height, output_width;
+
+      WorkItem(unsigned int i, unsigned int j, unsigned int height, unsigned int width)
+        : output_i(i), output_j(j), output_height(height), output_width(width) {}
+    };
+
+    auto execute = [&] (const WorkItem &item) {
+      // Create an array for the output pointers
+      TOutput * _outptr_array[strategy::out_rows() * strategy::out_cols()];
+      TOutput **const outptr_array = _outptr_array;
+
+      // Construct the output pointer array
+      {
+        const auto output_pad_right = strategy::out_rows() - item.output_width;
+        auto outptr_element = outptr_array;
+        auto outptr_row = outptr + item.output_i * ld_output_row + item.output_j * ld_output_col;
+
+        // Fill the array with pointers to the output buffer
+        for (unsigned int i = 0; i < strategy::out_rows() * strategy::out_cols(); i++)
+        {
+          outptr_array[i] = output_buffer;
+        }
+
+        // Fill in the valid portion of the array
+        for (unsigned int i = 0; i < item.output_height; i++)
+        {
+          auto outptr_col = outptr_row;
+          for (unsigned int j = 0; j < item.output_width; j++)
+          {
+            *(outptr_element++) = outptr_col;
+            outptr_col += ld_output_col;
+          }
+          outptr_element += output_pad_right;
+          outptr_row += ld_output_row;
+        }
+      }
+
+      const int start_i = item.output_i * strategy::stride_rows() - padding.top;
+      const int end_i = start_i + input_rows();
+      const unsigned int pad_top = std::max(0, 0 - start_i);
+      const unsigned int pad_bottom = std::max(0, end_i - static_cast<int>(input_height));
+
+      const int start_j = item.output_j * strategy::stride_cols() - padding.left;
+      const int end_j = start_j + input_cols();
+      const unsigned int pad_left = std::max(0, 0 - start_j);
+      const unsigned int pad_right = std::max(0, end_j - static_cast<int>(input_width));
+
+      // Create an array for the input pointers
+      const TInput * _inptr_array[input_rows() * input_cols()];
+      const TInput **const inptr_array = _inptr_array;
+      {
+        const unsigned int row_padding = pad_top + pad_bottom;
+        const unsigned int valid_rows = input_rows() - row_padding;
+
+        const unsigned int col_padding = pad_left + pad_right;
+        const unsigned int valid_cols = input_cols() - col_padding;
+
+        // Fill the array with pointers to the input buffer
+        for (unsigned int i = 0; i < input_rows() * input_cols(); i++)
+        {
+          inptr_array[i] = input_buffer;
+        }
+
+        // Compute valid initial pointer
+        auto inptr_row = inptr + std::max(start_i, 0) * ld_input_row + std::max(start_j, 0) * ld_input_col;
+
+        // Fill in the valid portion of the input array
+        auto inptr_element = inptr_array + pad_top * input_cols() + pad_left;
+        for (unsigned int i = 0; i < valid_rows; i++)
+        {
+          auto inptr_col = inptr_row;
+          for (unsigned int j = 0; j < valid_cols; j++)
+          {
+            *(inptr_element++) = inptr_col;
+            inptr_col += ld_input_col;
+          }
+
+          inptr_row += ld_input_row;
+          inptr_element += col_padding;  // Skip the padding elements
+        }
+      }
+
+      // Call the kernel
+#ifdef CYCLE_PROFILING
+      // TODO Work number
+      auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(item.output_height * item.output_width * strategy::pool_rows() * strategy::pool_cols()));
+#endif // CYCLE_PROFILING
+      strat.kernel(channels, inptr_array, outptr_array,
+                   pad_left, pad_top, pad_right, pad_bottom);
+    };
+
+    // Add the initial work item to the stack of work.
+    std::stack<WorkItem, std::vector<WorkItem>> stack;
+    stack.push(WorkItem(0, 0, output_height, output_width));
+    while (!stack.empty())
+    {
+      // Pop an item from the stack, bisect the largest dimension and either
+      // execute the resulting tiles or add them to the stack if they are too
+      // large.
+      const WorkItem item(stack.top());
+      stack.pop();
+
+      if (item.output_height <= strategy::out_rows() &&
+          item.output_width <= strategy::out_cols())
+      {
+        execute(item);
+      }
+      else
+      {
+        // Split the largest dimension, such that we get an exact number of
+        // tiles in the first partition.
+        if (item.output_height >= item.output_width)
+        {
+          const unsigned int height_in_tiles = (item.output_height + strategy::out_rows() - 1) / strategy::out_rows();
+          const unsigned int tiles_first = height_in_tiles - height_in_tiles / 2;
+
+          const unsigned int height_first = tiles_first * strategy::out_rows();
+          const unsigned int height_second = item.output_height - height_first;
+
+          stack.push(WorkItem(item.output_i + height_first, item.output_j, height_second, item.output_width));
+          stack.push(WorkItem(item.output_i, item.output_j, height_first, item.output_width));
+        }
+        else
+        {
+          const unsigned int width_in_tiles = item.output_width / strategy::out_cols();
+          const unsigned int tiles_first = width_in_tiles - width_in_tiles / 2;
+
+          const unsigned int width_first = tiles_first * strategy::out_cols();
+          const unsigned int width_second = item.output_width - width_first;
+
+          stack.push(WorkItem(item.output_i, item.output_j + width_first, item.output_height, width_second));
+          stack.push(WorkItem(item.output_i, item.output_j, item.output_height, width_first));
+        }
+      }
+    }
+  }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
new file mode 100644
index 0000000..3a15b28
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "pool_common.hpp"
+#include "utils.hpp"
+
+namespace arm_conv {
+namespace pooling {
+
+template <class strategy>
+class PoolingDepthfirstGeneric : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
+{
+  using TInput = typename strategy::operand_type;
+  using TOutput = typename strategy::return_type;
+
+  const PoolingArgs m_args;  // Copy of arguments
+
+  unsigned int input_rows(void) const
+  {
+    return m_args.pool_window.rows;
+  }
+
+  unsigned int input_cols(void) const
+  {
+    return m_args.pool_window.cols;
+  }
+
+  public:
+  PoolingDepthfirstGeneric(const PoolingArgs &args) : m_args(args)
+  {
+  }
+
+  PoolingDepthfirstGeneric(PoolingDepthfirstGeneric &) = delete;
+  PoolingDepthfirstGeneric &operator=(PoolingDepthfirstGeneric &) = delete;
+
+  size_t sizeof_input_pointer_array(void) const
+  {
+    return sizeof(TInput *) * input_rows() * input_cols();
+  }
+
+  size_t get_working_size(unsigned int num_threads) const override
+  {
+    return num_threads * sizeof_input_pointer_array();
+  }
+
+  void execute(
+    const void *const input,
+    void *const output,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    const size_t ld_input_col = m_args.n_channels;
+    const size_t ld_input_row = ld_input_col * m_args.input_cols;
+    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
+    const size_t ld_output_col = ld_input_col;
+    const size_t ld_output_row = ld_output_col * m_args.output_cols;
+    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
+
+    execute(
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    const void *const input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    void *const output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    execute(
+      m_args.n_batches, m_args.input_rows, m_args.input_cols,
+      m_args.n_channels,
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      m_args.padding,
+      m_args.output_rows, m_args.output_cols,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    unsigned int batches,
+    unsigned int height,
+    unsigned int width,
+    unsigned int channels,
+    const void *const _input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    const PaddingValues &padding,
+    unsigned int output_height,
+    unsigned int output_width,
+    void *const _output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const _working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    strategy strat(m_args.cpu_info);
+#ifdef CYCLE_PROFILING
+    arm_gemm::profiler prof;
+#endif // CYCLE_PROFILING
+
+    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
+    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
+    int start_out_height = static_cast<int>(thread_id * rows_per_thread);
+    int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));
+
+    unsigned int start_channel = 0;
+    unsigned int end_channel = channels;
+    if(output_height == 1)
+    {
+      const unsigned int channels_per_thread = roundup(channels, num_threads) / num_threads;
+      start_channel = thread_id * channels_per_thread;
+      end_channel = std::min(start_channel + channels_per_thread, channels);
+
+      // Reset start and end rows
+      start_out_height = 0;
+      end_out_height = output_height;
+    }
+
+    // Cast input and output pointers into the right types
+    const TInput *const inptr = static_cast<const TInput *>(_input) + start_channel;
+    TOutput *const outptr = static_cast<TOutput *>(_output) + start_channel;
+
+    // Grab the input pointer array
+    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
+    const TInput **const inptr_array = reinterpret_cast<const TInput **>(working_space + thread_id * sizeof_input_pointer_array());
+
+    // For each output tile, construct the requisite set of pointers and call
+    // into the kernel.
+    for (unsigned int batch = 0; batch < batches; batch++)
+    {
+      // Get batch pointers
+      const auto inptr_batch = inptr + batch * ld_input_batch;
+      auto outptr_row = outptr + batch * ld_output_batch + start_out_height * ld_output_row;
+
+      for (int out_i = start_out_height; out_i < end_out_height; out_i++)
+      {
+        const int start_in_i = out_i * m_args.pool_stride.rows - padding.top;
+        const int end_in_i = start_in_i + m_args.pool_window.rows;
+
+        // Compute top/bottom padding
+        const auto pad_top = static_cast<unsigned int>(std::max(0 - start_in_i, 0));
+        const auto pad_bottom = static_cast<unsigned int>(std::max<int>(end_in_i - height, 0));
+        const auto valid_rows = input_rows() - pad_top - pad_bottom;
+
+        auto outptr_col = outptr_row;
+        auto inptr_row = inptr_batch + (start_in_i + pad_top) * ld_input_row;
+
+        for (int out_j = 0, start_in_j = -padding.left;
+             out_j < static_cast<int>(output_width);
+             out_j++, start_in_j += m_args.pool_stride.cols)
+        {
+          const int end_in_j = start_in_j + m_args.pool_window.cols;
+
+          // Compute left/right padding
+          const auto pad_left = static_cast<unsigned int>(std::max(0 - start_in_j, 0));
+          const auto pad_right = static_cast<unsigned int>(std::max<int>(0, end_in_j - width));
+          const auto valid_cols = input_cols() - pad_left - pad_right;
+
+          // Construct the input pointer array - fill in all valid points
+          // contiguously.
+          const TInput **ptrs = inptr_array;
+          const TInput *rowptr = inptr_row + (start_in_j + pad_left) * ld_input_col;
+          for (auto i = 0u; i < valid_rows; i++)
+          {
+            const TInput *colptr = rowptr;
+            for (auto j = 0u; j < valid_cols; j++)
+            {
+              *(ptrs++) = colptr;
+              colptr += ld_input_col;
+            }
+            rowptr += ld_input_row;
+          }
+
+          // Compute the number of valid cells
+          const auto valid_cells = valid_rows * valid_cols;
+          const auto window_cells = m_args.exclude_padding ? valid_cells : input_rows() * input_cols();
+
+          // Get the output pointer for this call
+          TOutput *outptr = outptr_col;
+          outptr_col += ld_output_col;
+
+#ifdef CYCLE_PROFILING
+          // TODO Work number
+          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::out_rows() * strategy::out_cols() * strategy::pool_rows() * strategy::pool_cols()));
+#endif // CYCLE_PROFILING
+          strat.kernel(window_cells, valid_cells, end_channel - start_channel, inptr_array, outptr);
+        }
+
+        outptr_row += ld_output_row;
+      }
+    }
+  }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic_quantized.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic_quantized.hpp
new file mode 100644
index 0000000..9516042
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic_quantized.hpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "pool_common.hpp"
+#include "utils.hpp"
+
+namespace arm_conv {
+namespace pooling {
+
+template <class strategy>
+class PoolingDepthfirstGenericQuantized : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type, Requantize32>
+{
+  using TInput = typename strategy::operand_type;
+  using TOutput = typename strategy::return_type;
+
+  const PoolingArgs m_args;  // Copy of arguments
+  const Requantize32 m_requant;  // Quantization parameters
+
+  unsigned int input_rows(void) const
+  {
+    return m_args.pool_window.rows;
+  }
+
+  unsigned int input_cols(void) const
+  {
+    return m_args.pool_window.cols;
+  }
+
+  public:
+  PoolingDepthfirstGenericQuantized(const PoolingArgs &args, const Requantize32 &rq) : m_args(args), m_requant(rq)
+  {
+  }
+
+  PoolingDepthfirstGenericQuantized(PoolingDepthfirstGenericQuantized &) = delete;
+  PoolingDepthfirstGenericQuantized &operator=(PoolingDepthfirstGenericQuantized &) = delete;
+
+  size_t sizeof_input_pointer_array(void) const
+  {
+    return sizeof(TInput *) * input_rows() * input_cols();
+  }
+
+  size_t get_working_size(unsigned int num_threads) const override
+  {
+    return num_threads * sizeof_input_pointer_array();
+  }
+
+  void execute(
+    const void *const input,
+    void *const output,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    const size_t ld_input_col = m_args.n_channels;
+    const size_t ld_input_row = ld_input_col * m_args.input_cols;
+    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
+    const size_t ld_output_col = ld_input_col;
+    const size_t ld_output_row = ld_output_col * m_args.output_cols;
+    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
+
+    execute(
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    const void *const input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    void *const output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    execute(
+      m_args.n_batches, m_args.input_rows, m_args.input_cols,
+      m_args.n_channels,
+      input, ld_input_col, ld_input_row, ld_input_batch,
+      m_args.padding,
+      m_args.output_rows, m_args.output_cols,
+      output, ld_output_col, ld_output_row, ld_output_batch,
+      working_space,
+      thread_id, num_threads
+    );
+  }
+
+  void execute(
+    unsigned int batches,
+    unsigned int height,
+    unsigned int width,
+    unsigned int channels,
+    const void *const _input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    const PaddingValues &padding,
+    unsigned int output_height,
+    unsigned int output_width,
+    void *const _output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *const _working_space,
+    unsigned int thread_id,
+    unsigned int num_threads
+  ) const override
+  {
+    strategy strat(m_args.cpu_info);
+#ifdef CYCLE_PROFILING
+    arm_gemm::profiler prof;
+#endif // CYCLE_PROFILING
+
+    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
+    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
+    int start_out_height = static_cast<int>(thread_id * rows_per_thread);
+    int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));
+
+    unsigned int start_channel = 0;
+    unsigned int end_channel = channels;
+    if(output_height == 1)
+    {
+      const unsigned int channels_per_thread = roundup(channels, num_threads) / num_threads;
+      start_channel = thread_id * channels_per_thread;
+      end_channel = std::min(start_channel + channels_per_thread, channels);
+
+      // Reset start and end rows
+      start_out_height = 0;
+      end_out_height = output_height;
+    }
+
+    // Cast input and output pointers into the right types
+    const TInput *const inptr = static_cast<const TInput *>(_input) + start_channel;
+    TOutput *const outptr = static_cast<TOutput *>(_output) + start_channel;
+
+    // Grab the input pointer array
+    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
+    const TInput **const inptr_array = reinterpret_cast<const TInput **>(working_space + thread_id * sizeof_input_pointer_array());
+
+    // For each output tile, construct the requisite set of pointers and call
+    // into the kernel.
+    for (unsigned int batch = 0; batch < batches; batch++)
+    {
+      // Get batch pointers
+      const auto inptr_batch = inptr + batch * ld_input_batch;
+      const auto outptr_batch = outptr + batch * ld_output_batch;
+
+      for (int out_i = start_out_height; out_i < end_out_height; out_i++)
+      {
+        const int start_in_i = out_i * m_args.pool_stride.rows - padding.top;
+        const int end_in_i = start_in_i + m_args.pool_window.rows;
+
+        // Compute top/bottom padding
+        const auto pad_top = static_cast<unsigned int>(-std::min(start_in_i, 0));
+        const auto pad_bottom = static_cast<unsigned int>(-std::min(static_cast<int>(height) - end_in_i, 0));
+
+        for (int out_j = 0, start_in_j = -padding.left;
+             out_j < static_cast<int>(output_width);
+             out_j++, start_in_j += m_args.pool_stride.cols)
+        {
+          const int end_in_j = start_in_j + m_args.pool_window.cols;
+
+          // Compute left/right padding
+          const auto pad_left = static_cast<unsigned int>(-std::min(start_in_j, 0));
+          const auto pad_right = static_cast<unsigned int>(-std::min(static_cast<int>(width) - end_in_j, 0));
+
+          // Construct the input pointer array - fill in all valid points
+          // contiguously.
+          const TInput **ptrs = inptr_array;
+          for (auto i = pad_top; i < input_rows() - pad_bottom; i++)
+          {
+            // Can skip over the left padding because we will have either the
+            // same or less than the previous tile.
+            unsigned int j = pad_left;
+            const TInput *colptr = inptr_batch + (start_in_i + i) * ld_input_row + (start_in_j + j) * ld_input_col;
+            for (; j < input_cols() - pad_right; j++)
+            {
+              *(ptrs++) = colptr;
+              colptr += ld_input_col;
+            }
+          }
+
+          // Compute the number of valid cells
+          const auto valid_rows = input_rows() - pad_top - pad_bottom;
+          const auto valid_cols = input_cols() - pad_left - pad_right;
+          const auto valid_cells = valid_rows * valid_cols;
+          const auto window_cells = m_args.exclude_padding ? valid_cells : input_rows() * input_cols();
+
+          // Get the output pointer for this call
+          TOutput *outptr = outptr_batch + out_i * ld_output_row + out_j * ld_output_col;
+
+#ifdef CYCLE_PROFILING
+          // TODO Work number
+          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long) 0);
+#endif
+          strat.kernel(window_cells, valid_cells, end_channel - start_channel, inptr_array, outptr, m_requant);
+        }
+      }
+    }
+  }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
new file mode 100644
index 0000000..094c6aa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+// This can only be built if the target/compiler supports FP16 arguments.
+#ifdef __ARM_FP16_ARGS
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst.hpp"
+#include "pooling_depthfirst_generic.hpp"
+
+#include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#include "kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE)
+#include "kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace
+{
+  template <class Strategy>
+  bool is_supported(const PoolingArgs &args, const Nothing &)
+  {
+    return ((args.pool_type == Strategy::pooling_type()) &&
+            (args.pool_window.rows == Strategy::pool_rows()) &&
+            (args.pool_window.cols == Strategy::pool_cols()) &&
+            (args.pool_stride.rows == Strategy::stride_rows()) &&
+            (args.pool_stride.cols == Strategy::stride_cols()));
+  }
+}
+
+static const PoolingImplementation<__fp16, __fp16> pooling_fp16_methods[] = {
+  {
+    PoolingMethod::DEPTHFIRST,
+    "cpp_fp16_nhwc_1x1_stride_any_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.pool_window.rows == 1 && args.pool_window.cols == 1;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<__fp16>>(args);
+    },
+  },
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirst<sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    is_supported<sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirst<sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp16_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirstGeneric<sve_fp16_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp16_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirstGeneric<sve_fp16_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE)
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirst<a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    is_supported<a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirst<a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp16_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirstGeneric<a64_fp16_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp16_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
+      return new PoolingDepthfirstGeneric<a64_fp16_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<__fp16, __fp16> *pooling_implementation_list()
+{
+  return pooling_fp16_methods;
+}
+
+template UniquePoolingCommon<__fp16, __fp16> pooling(const PoolingArgs &, const Nothing &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
+
+#endif // __ARM_FP16_ARGS
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
new file mode 100644
index 0000000..002115d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst.hpp"
+#include "pooling_depthfirst_generic.hpp"
+
+#include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#include "kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE)
+#include "kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+namespace arm_conv {
+namespace pooling {
+
+namespace
+{
+  template <class Strategy>
+  bool is_supported(const PoolingArgs &args, const Nothing &)
+  {
+    return ((args.pool_type == Strategy::pooling_type()) &&
+            (args.pool_window.rows == Strategy::pool_rows()) &&
+            (args.pool_window.cols == Strategy::pool_cols()) &&
+            (args.pool_stride.rows == Strategy::stride_rows()) &&
+            (args.pool_stride.cols == Strategy::stride_cols()));
+  }
+}
+
+static const PoolingImplementation<float, float> pooling_fp32_methods[] = {
+  {
+    PoolingMethod::DEPTHFIRST,
+    "cpp_fp32_nhwc_1x1_stride_any_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.pool_window.rows == 1 && args.pool_window.cols == 1;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<float>>(args);
+    },
+  },
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirst<sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    is_supported<sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirst<sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp32_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirstGeneric<sve_fp32_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_fp32_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirstGeneric<sve_fp32_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirst<a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst",
+    is_supported<a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirst<a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp32_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirstGeneric<a64_fp32_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_fp32_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
+      return new PoolingDepthfirstGeneric<a64_fp32_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<float, float> *pooling_implementation_list()
+{
+  return pooling_fp32_methods;
+}
+
+template UniquePoolingCommon<float, float> pooling(const PoolingArgs &, const Nothing &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp
new file mode 100644
index 0000000..3d968b8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "pooling.hpp"
+
+#include <cstddef>
+#include <functional>
+#include <cstring>
+
+namespace arm_conv {
+namespace pooling {
+
+template <typename TInput, typename TOutput, class OutputStage = Nothing>
+struct PoolingImplementation
+{
+  const PoolingMethod method;
+  const char * name;
+  std::function<bool(const PoolingArgs &, const OutputStage &)> is_supported;
+  std::function<uint64_t(const PoolingArgs &, const OutputStage &)> cycle_estimate;
+  std::function<PoolingCommon<TInput, TOutput, OutputStage> *(const PoolingArgs &, const OutputStage &)> initialise;
+
+  bool get_is_supported(const PoolingArgs &args, const OutputStage &os) const
+  {
+    return (is_supported == nullptr) ? true : is_supported(args, os);
+  }
+
+  uint64_t get_cycle_estimate(const PoolingArgs &args, const OutputStage &os) const
+  {
+    return (cycle_estimate == nullptr) ? 0 : cycle_estimate(args, os);
+  }
+
+  PoolingCommon<TInput, TOutput, OutputStage> *get_instance(const PoolingArgs &args, const OutputStage &os) const
+  {
+    return initialise(args, os);
+  }
+};
+
+template <typename TInput, typename TOutput, class OutputStage = Nothing>
+const PoolingImplementation<TInput, TOutput, OutputStage> *pooling_implementation_list();
+
+template <typename TInput, typename TOutput, class OutputStage = Nothing>
+bool find_implementation(
+  const PoolingArgs &args,
+  const OutputStage &os,
+  const PoolingImplementation<TInput, TOutput, OutputStage> * &selected
+)
+{
+  // For now, return the first valid implementation
+  const auto *impl = pooling_implementation_list<TInput, TOutput, OutputStage>();
+  for (; impl->method != PoolingMethod::DEFAULT; impl++)
+  {
+    if (args.config != nullptr)
+    {
+      // Apply filters provided by the configuration
+      const auto cfg = args.config;
+
+      if (cfg->filter != "" && !std::strstr(impl->name, cfg->filter.c_str()))
+      {
+        continue;
+      }
+    }
+
+    if (impl->get_is_supported(args, os))
+    {
+      selected = impl;
+      return true;
+    }
+  }
+  return false;
+}
+
+template <typename TInput, typename TOutput, class OutputStage>
+UniquePoolingCommon<TInput, TOutput, OutputStage> pooling(const PoolingArgs &args, const OutputStage &os)
+{
+  const PoolingImplementation<TInput, TOutput, OutputStage> *impl = nullptr;
+  const bool success = find_implementation<TInput, TOutput, OutputStage>(args, os, impl);
+  return UniquePoolingCommon<TInput, TOutput, OutputStage>(success ? impl->get_instance(args, os) : nullptr);
+}
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
new file mode 100644
index 0000000..490fc0d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst.hpp"
+#include "pooling_depthfirst_generic.hpp"
+
+#include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#if defined(SVE2)
+#include "kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp"
+#endif  // defined(SVE2)
+#include "kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_s8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE)
+#include "kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_s8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+namespace
+{
+  template <class Strategy>
+  bool is_supported(const PoolingArgs &args, const Nothing &)
+  {
+    return ((args.pool_type == Strategy::pooling_type()) &&
+            (args.pool_window.rows == Strategy::pool_rows()) &&
+            (args.pool_window.cols == Strategy::pool_cols()) &&
+            (args.pool_stride.rows == Strategy::stride_rows()) &&
+            (args.pool_stride.cols == Strategy::stride_cols()));
+  }
+}
+
+static const PoolingImplementation<int8_t, int8_t> pooling_s8_methods[] = {
+  {
+    PoolingMethod::DEPTHFIRST,
+    "cpp_s8_nhwc_1x1_stride_any_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.pool_window.rows == 1 && args.pool_window.cols == 1;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<int8_t>>(args);
+    },
+  },
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#if defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirstGeneric<sve_s8_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirst<sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirstGeneric<sve_s8_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirst<a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_s8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirstGeneric<a64_s8_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_s8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      return new PoolingDepthfirstGeneric<a64_s8_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<int8_t, int8_t> *pooling_implementation_list()
+{
+  return pooling_s8_methods;
+}
+
+template UniquePoolingCommon<int8_t, int8_t> pooling(const PoolingArgs &, const Nothing &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
new file mode 100644
index 0000000..fd4e045
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst_generic_quantized.hpp"
+
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+#include "kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
+#include "kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+static const PoolingImplementation<int8_t, int8_t, Requantize32> pooling_u8_methods[] = {
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<sve_s8q_nhwc_avg_generic_depthfirst>(args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<sve_s8q_nhwc_max_generic_depthfirst>(args, rq);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_s8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<a64_s8q_nhwc_avg_generic_depthfirst>(args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_s8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<a64_s8q_nhwc_max_generic_depthfirst>(args, rq);
+    },
+  },
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<int8_t, int8_t, Requantize32> *pooling_implementation_list()
+{
+  return pooling_u8_methods;
+}
+
+template UniquePoolingCommon<int8_t, int8_t, Requantize32> pooling(const PoolingArgs &, const Requantize32 &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
new file mode 100644
index 0000000..0523549
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst.hpp"
+#include "pooling_depthfirst_generic.hpp"
+
+#include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#if defined(SVE2)
+#include "kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp"
+#endif  // defined(SVE2)
+#include "kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/sve_u8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE)
+#include "kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
+#include "kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_u8_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+namespace
+{
+  template <class Strategy>
+  bool is_supported(const PoolingArgs &args, const Nothing &)
+  {
+    return ((args.pool_type == Strategy::pooling_type()) &&
+            (args.pool_window.rows == Strategy::pool_rows()) &&
+            (args.pool_window.cols == Strategy::pool_cols()) &&
+            (args.pool_stride.rows == Strategy::stride_rows()) &&
+            (args.pool_stride.cols == Strategy::stride_cols()));
+  }
+}
+
+static const PoolingImplementation<uint8_t, uint8_t> pooling_u8_methods[] = {
+  {
+    PoolingMethod::DEPTHFIRST,
+    "cpp_u8_nhwc_1x1_stride_any_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.pool_window.rows == 1 && args.pool_window.cols == 1;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<uint8_t>>(args);
+    },
+  },
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+#if defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      // This kernel can only be used when there is either no padding, or we don't care
+      // about the value of the padding. Otherwise, we would need to pass in the zero-point
+      // for the quantization regime.
+      return (args.exclude_padding ||
+              (args.padding.top == 0 && args.padding.bottom == 0 &&
+               args.padding.left == 0 && args.padding.right == 0)
+              ) && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirstGeneric<sve_u8_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirst<sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirstGeneric<sve_u8_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    is_supported<a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst>,
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirst<a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_u8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      // This kernel can only be used when there is either no padding, or we don't care
+      // about the value of the padding. Otherwise, we would need to pass in the zero-point
+      // for the quantization regime.
+      return (args.exclude_padding ||
+              (args.padding.top == 0 && args.padding.bottom == 0 &&
+               args.padding.left == 0 && args.padding.right == 0)
+              ) && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirstGeneric<a64_u8_nhwc_avg_generic_depthfirst>(args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_u8_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      return new PoolingDepthfirstGeneric<a64_u8_nhwc_max_generic_depthfirst>(args);
+    },
+  },
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<uint8_t, uint8_t> *pooling_implementation_list()
+{
+  return pooling_u8_methods;
+}
+
+template UniquePoolingCommon<uint8_t, uint8_t> pooling(const PoolingArgs &, const Nothing &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
new file mode 100644
index 0000000..41303fb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm_local.hpp"
+
+#include "pooling_implementation.hpp"
+#include "pooling_depthfirst_generic_quantized.hpp"
+
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+#include "kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
+#include "kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp"
+#include "kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp"
+#endif  // defined(__aarch64__)
+
+#include <cstdint>
+
+namespace arm_conv {
+namespace pooling {
+
+static const PoolingImplementation<uint8_t, uint8_t, Requantize32> pooling_u8_methods[] = {
+#if defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE) && defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<sve_u8q_nhwc_avg_generic_depthfirst>(args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<sve_u8q_nhwc_max_generic_depthfirst>(args, rq);
+    },
+  },
+#endif  // defined(__ARM_FEATURE_SVE) && defined(SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_u8q_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<a64_u8q_nhwc_avg_generic_depthfirst>(args, rq);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "a64_u8q_nhwc_max_generic_depthfirst",
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
+    nullptr,
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
+      return new PoolingDepthfirstGenericQuantized<a64_u8q_nhwc_max_generic_depthfirst>(args, rq);
+    },
+  },
+#endif  // defined(__aarch64__)
+  { PoolingMethod::DEFAULT, "", nullptr, nullptr, nullptr },  // End of list
+};
+
+template <>
+const PoolingImplementation<uint8_t, uint8_t, Requantize32> *pooling_implementation_list()
+{
+  return pooling_u8_methods;
+}
+
+template UniquePoolingCommon<uint8_t, uint8_t, Requantize32> pooling(const PoolingArgs &, const Requantize32 &);
+
+}  //  namespace pooling
+}  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.cpp
new file mode 100644
index 0000000..3c84f36
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+using namespace arm_compute::misc::shape_calculator;
+
+void NEPoolingAssemblyWrapperKernel::configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    // Output initialization if not yet initialized
+    auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_pool_shape(*input, info)));
+
+    const bool requantize = input->quantization_info() != output->quantization_info();
+
+    switch(input->data_type())
+    {
+        case DataType::QASYMM8:
+            if(requantize)
+            {
+                create_arm_pooling_requant<uint8_t, uint8_t>(input, output, info, cpu_info);
+            }
+            else
+            {
+                create_arm_pooling<uint8_t, uint8_t>(input, output, info, cpu_info);
+            }
+            break;
+        case DataType::QASYMM8_SIGNED:
+            if(requantize)
+            {
+                create_arm_pooling_requant<int8_t, int8_t>(input, output, info, cpu_info);
+            }
+            else
+            {
+                create_arm_pooling<int8_t, int8_t>(input, output, info, cpu_info);
+            }
+            break;
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+        case DataType::F16:
+            create_arm_pooling<float16_t, float16_t>(input, output, info, cpu_info);
+            break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+        case DataType::F32:
+            create_arm_pooling<float, float>(input, output, info, cpu_info);
+            break;
+        default:
+            break;
+    }
+
+    Window win = calculate_max_window(*output, Steps());
+    INEKernel::configure(win);
+}
+
+Status NEPoolingAssemblyWrapperKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->data_layout() != DataLayout::NHWC) || (info.data_layout != DataLayout::NHWC), "Only NHWC is supported by assembly kernels");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG((info.pool_type != PoolingType::AVG) && (info.pool_type != PoolingType::MAX),
+                                    "Only AVG and MAX pooling are supported by assembly kernels");
+
+    if(output->total_size() > 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+
+        const auto input_qinfo  = input->quantization_info().uniform();
+        const auto output_qinfo = output->quantization_info().uniform();
+
+        if(input_qinfo != output_qinfo)
+        {
+            const float multiplier = input_qinfo.scale / output_qinfo.scale;
+            int32_t     output_multiplier{};
+            int32_t     output_shift{};
+            ARM_COMPUTE_RETURN_ERROR_ON(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
+        }
+        else
+        {
+            if(input->data_type() == DataType::QASYMM8)
+            {
+                const bool has_padding = info.pad_stride_info.has_padding();
+                ARM_COMPUTE_RETURN_ERROR_ON_MSG(!info.exclude_padding && has_padding, "Assembly kernels do not support padding for QASYMM8 with same input/output quantization info");
+            }
+        }
+    }
+    else
+    {
+        if(input->data_type() == DataType::QASYMM8)
+        {
+            // If output is not configured, the quantization info are the same
+            const bool has_padding = info.pad_stride_info.has_padding();
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(!info.exclude_padding && has_padding, "Assembly kernels do not support padding for QASYMM8 with same input/output quantization info");
+        }
+    }
+    return Status{};
+}
+
+void NEPoolingAssemblyWrapperKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_kernel_asm.get());
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_UNUSED(window);
+    ARM_COMPUTE_UNUSED(info);
+
+    ARM_COMPUTE_ERROR_ON(tensors.empty());
+
+    const ITensor *input     = tensors.get_const_tensor(TensorType::ACL_SRC);
+    ITensor       *output    = tensors.get_tensor(TensorType::ACL_DST_0);
+    ITensor       *workspace = tensors.get_tensor(TensorType::ACL_DST_1);
+
+    const auto in_ptr        = input->buffer() + input->info()->offset_first_element_in_bytes();
+    auto       out_ptr       = output->buffer() + output->info()->offset_first_element_in_bytes();
+    auto       working_space = workspace->buffer() + workspace->info()->offset_first_element_in_bytes();
+
+    _kernel_asm->execute(in_ptr, out_ptr, working_space, info.thread_id, info.num_threads);
+}
+
+size_t NEPoolingAssemblyWrapperKernel::get_working_size(unsigned int num_threads) const
+{
+    return _kernel_asm->get_working_size(num_threads);
+}
+
+bool NEPoolingAssemblyWrapperKernel::is_configured() const
+{
+    return _kernel_asm != nullptr;
+}
+
+template <typename TypeInput, typename TypeOutput>
+void NEPoolingAssemblyWrapperKernel::create_arm_pooling(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info)
+{
+    const arm_conv::pooling::PoolingType pool_type = (info.pool_type == PoolingType::AVG) ? arm_conv::pooling::PoolingType::AVERAGE : arm_conv::pooling::PoolingType::MAX;
+
+    arm_conv::pooling::PoolingWindow window{};
+    window.cols = static_cast<unsigned int>(info.pool_size.x());
+    window.rows = static_cast<unsigned int>(info.pool_size.y());
+
+    arm_conv::pooling::PoolingStride stride{};
+    std::tie(stride.cols, stride.rows) = info.pad_stride_info.stride();
+
+    const arm_conv::pooling::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
+
+    constexpr unsigned int idx_width    = 1;
+    constexpr unsigned int idx_height   = 2;
+    constexpr unsigned int idx_channels = 0;
+    constexpr unsigned int idx_batches  = 3;
+
+    const unsigned int n_batches   = input->dimension(idx_batches);
+    const unsigned int input_rows  = input->dimension(idx_height);
+    const unsigned int input_cols  = input->dimension(idx_width);
+    const unsigned int n_channels  = input->dimension(idx_channels);
+    const unsigned int output_rows = output->dimension(idx_height);
+    const unsigned int output_cols = output->dimension(idx_width);
+
+    arm_conv::pooling::PoolingArgs args(&cpu_info, pool_type, window, stride, info.exclude_padding, n_batches, input_rows, input_cols, n_channels, output_rows, output_cols, padding, nullptr);
+
+    // Configure assembly pooling kernel
+    auto pooling_kernel_asm = arm_conv::pooling::pooling<TypeInput, TypeOutput>(args);
+    if(pooling_kernel_asm == nullptr)
+    {
+        // Configuration not supported: Leave function unconfigured:
+        return;
+    }
+
+    _kernel_asm = std::move(pooling_kernel_asm);
+}
+
+template <typename TypeInput, typename TypeOutput>
+void NEPoolingAssemblyWrapperKernel::create_arm_pooling_requant(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info)
+{
+    const arm_conv::pooling::PoolingType pool_type = (info.pool_type == PoolingType::AVG) ? arm_conv::pooling::PoolingType::AVERAGE : arm_conv::pooling::PoolingType::MAX;
+
+    arm_conv::pooling::PoolingWindow window{};
+    window.cols = static_cast<unsigned int>(info.pool_size.x());
+    window.rows = static_cast<unsigned int>(info.pool_size.y());
+
+    arm_conv::pooling::PoolingStride stride{};
+    std::tie(stride.cols, stride.rows) = info.pad_stride_info.stride();
+
+    const arm_conv::pooling::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
+
+    constexpr unsigned int idx_width    = 1;
+    constexpr unsigned int idx_height   = 2;
+    constexpr unsigned int idx_channels = 0;
+    constexpr unsigned int idx_batches  = 3;
+
+    const unsigned int n_batches   = input->dimension(idx_batches);
+    const unsigned int input_rows  = input->dimension(idx_height);
+    const unsigned int input_cols  = input->dimension(idx_width);
+    const unsigned int n_channels  = input->dimension(idx_channels);
+    const unsigned int output_rows = output->dimension(idx_height);
+    const unsigned int output_cols = output->dimension(idx_width);
+
+    arm_conv::pooling::PoolingArgs args(&cpu_info, pool_type, window, stride, info.exclude_padding, n_batches, input_rows, input_cols, n_channels, output_rows, output_cols, padding, nullptr);
+
+    const auto input_qinfo  = input->quantization_info().uniform();
+    const auto output_qinfo = output->quantization_info().uniform();
+
+    const float multiplier = input_qinfo.scale / output_qinfo.scale;
+    int32_t     output_multiplier{};
+    int32_t     output_shift{};
+    quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+
+    const arm_conv::pooling::Requantize32 requant_args(input_qinfo.offset,
+                                                       output_qinfo.offset,
+                                                       output_shift, // left shift
+                                                       0,            // right shift
+                                                       output_multiplier);
+
+    // Configure assembly pooling kernel with requantization
+    auto pooling_kernel_asm = arm_conv::pooling::pooling<TypeInput, TypeOutput, arm_conv::pooling::Requantize32>(args, requant_args);
+    if(pooling_kernel_asm == nullptr)
+    {
+        // Configuration not supported: Leave function unconfigured:
+        return;
+    }
+
+    _kernel_asm = std::move(pooling_kernel_asm);
+}
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h b/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h
new file mode 100644
index 0000000..b2fa5b5
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_ASSEMBLY_POOLING_KERNEL_WRAPPER_KERNEL_H
+#define ARM_COMPUTE_ASSEMBLY_POOLING_KERNEL_WRAPPER_KERNEL_H
+
+#include "src/core/NEON/INEKernel.h"
+#include "src/core/NEON/kernels/assembly/pooling.hpp"
+
+#include "pool_common.hpp"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** This class is a wrapper for the assembly kernels.
+  *
+  * Some kernels were written in assembly and highly optimised for specific
+  * CPUs like A53 or A55. The arm compute library creates an instance of
+  * NEPoolingAssemblyWrapperKernel and other auxiliary data structures to
+  * execute a single assembly kernel in the context of an NEFunction.
+  *
+  */
+class NEPoolingAssemblyWrapperKernel final : public INEKernel
+{
+public:
+    /** Constructor
+     */
+    NEPoolingAssemblyWrapperKernel()                                  = default;
+    NEPoolingAssemblyWrapperKernel(NEPoolingAssemblyWrapperKernel &)  = delete;
+    NEPoolingAssemblyWrapperKernel(NEPoolingAssemblyWrapperKernel &&) = default;
+    NEPoolingAssemblyWrapperKernel &operator=(NEPoolingAssemblyWrapperKernel &) = delete;
+
+    const char *name() const override
+    {
+        return "NEPoolingAssemblyWrapperKernel";
+    }
+
+    /** Initialise the kernel's input and output.
+     *
+     * @param[in]  input  Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in]  info   Pooling meta-data
+     */
+    void configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info);
+
+    /** Indicates whether or not this function can be used to process the given parameters.
+     *
+     * @param[in] input  Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in] info   Pooling meta-data
+     *
+     * @return a status.
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+
+    /** Get size of the workspace needed by the assembly kernel.
+     *
+     * @param[in] num_threads Maximum number of threads that are going to be spawned.
+     *
+     * @return size of workspace
+     */
+    size_t get_working_size(unsigned int num_threads) const;
+
+    /** Was the asm kernel successfully configured?
+     *
+     * @return True if the asm kernel is configured and ready to run
+     */
+    bool is_configured() const;
+
+private:
+    /** Helper function to create the assembly kernel.
+     *
+     * @param[in] input  Input tensor info.
+     * @param[in] output Output tensor info.
+     * @param[in] info   Pooling layer meta-data.
+     */
+    template <typename TypeInput, typename TypeOutput>
+    void create_arm_pooling(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info);
+
+    /** Helper function to create the assembly kernel with requantization support
+     *
+     * @param[in] input  Input tensor info.
+     * @param[in] output Output tensor info.
+     * @param[in] info   Pooling layer meta-data.
+     */
+    template <typename TypeInput, typename TypeOutput>
+    void create_arm_pooling_requant(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info, const CPUInfo &cpu_info);
+
+    std::unique_ptr<arm_conv::pooling::IPoolingCommon> _kernel_asm{ nullptr };
+};
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_ASSEMBLY_POOLING_KERNEL_WRAPPER_KERNEL_H */
diff --git a/src/core/NEON/kernels/assembly/arm_gemm_local.hpp b/src/core/NEON/kernels/assembly/arm_gemm_local.hpp
index 4715f25..c08ed2d 100644
--- a/src/core/NEON/kernels/assembly/arm_gemm_local.hpp
+++ b/src/core/NEON/kernels/assembly/arm_gemm_local.hpp
@@ -27,8 +27,5 @@
 
 #include "arm_compute/core/CPP/CPPTypes.h"
 
-namespace arm_gemm
-{
 using CPUModel = arm_compute::CPUModel;
 using CPUInfo  = arm_compute::CPUInfo;
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/pool_common.hpp b/src/core/NEON/kernels/assembly/pool_common.hpp
new file mode 100644
index 0000000..fdc18ae
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/pool_common.hpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef CYCLE_PROFILING
+#include "profiler.hpp"
+#endif // CYCLE_PROFILING
+
+namespace arm_conv
+{
+namespace pooling
+{
+enum class PoolingType
+{
+    AVERAGE,
+    MAX,
+};
+
+enum class PoolingMethod
+{
+    DEFAULT,
+    DEPTHFIRST,
+    PLANAR,
+};
+
+struct PoolingWindow
+{
+    unsigned int rows, cols;
+};
+
+struct PoolingStride
+{
+    unsigned int rows, cols;
+};
+
+struct PaddingValues
+{
+    unsigned int left, top, right, bottom;
+};
+
+class IPoolingCommon
+{
+public:
+    virtual ~IPoolingCommon() = default;
+
+    // Determine the amount of working space required.
+    virtual size_t get_working_size(unsigned int num_threads) const = 0;
+
+    // Execute pooling over the specified area of memory.
+    virtual void execute(
+        const void *const input,
+        void *const       output,
+        void             *working_space,
+        unsigned int      thread_id,
+        unsigned int      num_threads) const = 0;
+
+    virtual void execute(
+        const void *const input,
+        size_t            ld_input_col,
+        size_t            ld_input_row,
+        size_t            ld_input_batch,
+        void *const       output,
+        size_t            ld_output_col,
+        size_t            ld_output_row,
+        size_t            ld_output_batch,
+        void             *working_space,
+        unsigned int      thread_id,
+        unsigned int      num_threads) const = 0;
+
+    virtual void execute(
+        unsigned int      batches,
+        unsigned int      height,
+        unsigned int      width,
+        unsigned int      channels,
+        const void *const input,
+        size_t            ld_input_col,
+        size_t            ld_input_row,
+        size_t            ld_input_batch,
+        const PaddingValues &,
+        unsigned int output_height,
+        unsigned int output_width,
+        void *const  output,
+        size_t       ld_output_col,
+        size_t       ld_output_row,
+        size_t       ld_output_batch,
+        void        *working_space,
+        unsigned int thread_id,
+        unsigned int num_threads) const = 0;
+};
+
+struct Nothing
+{
+};
+
+template <typename TInput, typename TOutput, class OutputStage = Nothing>
+class PoolingCommon : public IPoolingCommon
+{
+};
+
+} // namespace pooling
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/assembly/pooling.hpp b/src/core/NEON/kernels/assembly/pooling.hpp
new file mode 100644
index 0000000..2325bd0
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/pooling.hpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "arm_gemm_local.hpp"
+#include "pool_common.hpp"
+
+#include <memory>
+
+namespace arm_conv
+{
+namespace pooling
+{
+struct PoolingConfig
+{
+    PoolingMethod method = PoolingMethod::DEFAULT;
+    std::string   filter = "";
+
+    PoolingConfig(PoolingMethod method)
+        : method(method) {};
+    PoolingConfig() {};
+};
+
+struct PoolingArgs
+{
+    const CPUInfo *cpu_info;
+
+    PoolingType   pool_type;
+    PoolingWindow pool_window;
+    PoolingStride pool_stride;
+    bool          exclude_padding;
+
+    unsigned int n_batches, input_rows, input_cols, n_channels;
+    unsigned int output_rows, output_cols;
+
+    PaddingValues padding;
+
+    const PoolingConfig *config;
+
+    PoolingArgs(
+        const CPUInfo       *cpu_info,
+        PoolingType          pool_type,
+        const PoolingWindow &window,
+        const PoolingStride &stride,
+        bool                 exclude_padding,
+        unsigned int         n_batches,
+        unsigned int         input_rows,
+        unsigned int         input_cols,
+        unsigned int         n_channels,
+        unsigned int         output_rows,
+        unsigned int         output_cols,
+        const PaddingValues &padding,
+        const PoolingConfig *cfg)
+        : cpu_info(cpu_info), pool_type(pool_type), pool_window(window), pool_stride(stride), exclude_padding(exclude_padding), n_batches(n_batches), input_rows(input_rows), input_cols(input_cols),
+          n_channels(n_channels), output_rows(output_rows), output_cols(output_cols), padding(padding), config(cfg)
+    {
+        // If either of the pooling window dimensions are set to zero, meaning
+        // "pool everything", then replace with the corresponding input dimension.
+        if(pool_window.rows == 0)
+        {
+            pool_window.rows = input_rows;
+        }
+        if(pool_window.cols == 0)
+        {
+            pool_window.cols = input_cols;
+        }
+    }
+};
+
+struct Requantize32
+{
+    int32_t input_offset  = 0;
+    int32_t output_offset = 0;
+
+    int32_t per_layer_left_shift  = 0;
+    int32_t per_layer_right_shift = 0;
+    int32_t per_layer_mul         = 0;
+
+    Requantize32(int32_t input_offset, int32_t output_offset,
+                 int32_t per_layer_left_shift, int32_t per_layer_right_shift,
+                 int32_t per_layer_mul)
+        : input_offset(input_offset), output_offset(output_offset), per_layer_left_shift(per_layer_left_shift), per_layer_right_shift(per_layer_right_shift), per_layer_mul(per_layer_mul)
+    {
+    }
+};
+
+template <typename TInput, typename TOutput, class OutputStage = Nothing>
+using UniquePoolingCommon = std::unique_ptr<PoolingCommon<TInput, TOutput, OutputStage>>;
+
+// Get a pooling engine
+template <typename TInput, typename TOutput = TInput, class OutputStage = Nothing>
+UniquePoolingCommon<TInput, TOutput, OutputStage> pooling(const PoolingArgs &, const OutputStage & = {});
+
+} // namespace pooling
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
index 3cfb6e6..52ff7b3 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp
@@ -173,7 +173,7 @@
     /** Create a new Winograd convolution layer.
      */
     WinogradConvolutionLayer(
-      const arm_gemm::CPUInfo &cpuinfo,       /** Describes CPU properties. */
+      const CPUInfo &cpuinfo,       /** Describes CPU properties. */
       const int n_threads,          /** Maximum number of threads used to execute the convolution. */
       const int n_batches,          /** Number of batches in the input and output tensors. */
       const int n_input_channels,   /** Number of feature maps in a batch of the input tensor. */
diff --git a/src/runtime/NEON/INEOperator.cpp b/src/runtime/NEON/INEOperator.cpp
index a13b29b..ff643d1 100644
--- a/src/runtime/NEON/INEOperator.cpp
+++ b/src/runtime/NEON/INEOperator.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -54,7 +54,7 @@
 
 MemoryRequirements INEOperator::workspace() const
 {
-    return {};
+    return _workspace;
 }
 } // namespace experimental
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp
new file mode 100644
index 0000000..2600e2b
--- /dev/null
+++ b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+NEPoolingAssemblyDispatch::~NEPoolingAssemblyDispatch() = default;
+
+void NEPoolingAssemblyDispatch::configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info)
+{
+    const CPUInfo     &ci          = NEScheduler::get().cpu_info();
+    const unsigned int num_threads = NEScheduler::get().num_threads();
+
+    // If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
+    if(!NEPoolingAssemblyDispatch::validate(input, output, info))
+    {
+        return;
+    }
+
+    auto pooling_wrapper = std::make_unique<NEPoolingAssemblyWrapperKernel>();
+    ARM_COMPUTE_ERROR_ON(pooling_wrapper == nullptr);
+    pooling_wrapper->configure(input, output, info, ci);
+
+    // Check if we have Global Pooling Layer
+    _is_global_pooling_layer = (input->dimension(2) == info.pool_size.width) && (input->dimension(1) == info.pool_size.height);
+
+    // Set workspace requirements
+    const unsigned int alignment = 4096;
+    _workspace.push_back(MemoryInfo(TensorType::ACL_DST_1, pooling_wrapper->get_working_size(num_threads), alignment));
+
+    _kernel = std::move(pooling_wrapper);
+}
+
+Status NEPoolingAssemblyDispatch::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info)
+{
+    return NEPoolingAssemblyWrapperKernel::validate(input, output, info);
+}
+
+bool NEPoolingAssemblyDispatch::is_configured() const
+{
+    return _kernel != nullptr;
+}
+
+void NEPoolingAssemblyDispatch::run(ITensorPack &tensors)
+{
+    if(tensors.empty())
+    {
+        ARM_COMPUTE_ERROR("No inputs provided");
+    }
+
+    if(_is_global_pooling_layer)
+    {
+        NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, tensors);
+    }
+    else
+    {
+        NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, tensors);
+    }
+}
+} // namespace experimental
+
+struct NEPoolingAssemblyDispatch::Impl
+{
+    const ITensor                                           *src{ nullptr };
+    ITensor                                                 *dst{ nullptr };
+    ITensor                                                 *workspace{ nullptr };
+    std::unique_ptr<experimental::NEPoolingAssemblyDispatch> op{ nullptr };
+};
+
+NEPoolingAssemblyDispatch::NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&) = default;
+
+NEPoolingAssemblyDispatch &NEPoolingAssemblyDispatch::operator=(NEPoolingAssemblyDispatch &&) = default;
+
+NEPoolingAssemblyDispatch::~NEPoolingAssemblyDispatch() = default;
+
+NEPoolingAssemblyDispatch::NEPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
+    : _impl(std::make_unique<Impl>()),
+      _memory_group(std::move(memory_manager)),
+      _workspace()
+{
+}
+
+void NEPoolingAssemblyDispatch::configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    _impl->src       = input;
+    _impl->dst       = output;
+    _impl->workspace = &_workspace;
+
+    _impl->op = std::make_unique<experimental::NEPoolingAssemblyDispatch>();
+    _impl->op->configure(input->info(), output->info(), info);
+
+    const auto workspace = _impl->op->workspace().at(0);
+    if(workspace.size > 0)
+    {
+        // Allocate workspace
+        allocate_workspace(workspace.size, workspace.alignment);
+    }
+}
+
+Status NEPoolingAssemblyDispatch::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info)
+{
+    return experimental::NEPoolingAssemblyDispatch::validate(input, output, info);
+}
+
+bool NEPoolingAssemblyDispatch::is_configured() const
+{
+    return _impl->op->is_configured();
+}
+
+void NEPoolingAssemblyDispatch::run()
+{
+    ITensorPack pack;
+    pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+    pack.add_tensor(TensorType::ACL_DST_0, _impl->dst);
+    pack.add_tensor(TensorType::ACL_DST_1, _impl->workspace);
+    _impl->op->run(pack);
+}
+
+void NEPoolingAssemblyDispatch::allocate_workspace(size_t workspace_size, size_t alignment)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
+    _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+    _memory_group.manage(&_workspace);
+    _workspace.allocator()->allocate();
+}
+} //namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h
new file mode 100644
index 0000000..f6d232b
--- /dev/null
+++ b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
+#define ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
+
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/INEOperator.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "src/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+// Forward Declarations
+class ITensor;
+struct PoolingLayerInfo;
+
+/** Assembly kernel glue */
+class NEPoolingAssemblyDispatch : public IFunction
+{
+public:
+    /** Constructor */
+    NEPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
+    /** Default move constructor */
+    NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
+    /** Default move assignment operator */
+    NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&);
+    /** Destructor */
+    ~NEPoolingAssemblyDispatch();
+
+    /** If supported create an assembly routine, else fallback to Compute Library function.
+     *
+     * @param[in]  input  Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in]  info   Pooling meta-data
+     */
+    void configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &info);
+
+    /** Indicates whether or not this function can be used to process the given parameters.
+     *
+     * @param[in] input  Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in] info   Pooling meta-data
+     *
+     * @return a status.
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
+
+    /** Was the function successfully configured ?
+     *
+     * @return True if the function is configured and ready to run
+     */
+    bool is_configured() const;
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    /** Helper function to allocate memory for the workspace needed by the
+     * assembly kernels
+     *
+     * @param[in] workspace_size Total size of the workspace.
+     * @param[in] alignment      Alignment requirement in bytes.
+     */
+    void allocate_workspace(size_t workspace_size, size_t alignment);
+
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
+
+    MemoryGroup _memory_group{};
+    Tensor      _workspace{};
+};
+
+namespace experimental
+{
+/** Basic function to run pooling assembly kernels */
+class NEPoolingAssemblyDispatch : public INEOperator
+{
+public:
+    /** Constructor */
+    NEPoolingAssemblyDispatch() = default;
+    /** Prevent instances of this class from being copied */
+    NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
+    /** Default move constructor */
+    NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&) = default;
+    /** Prevent instances of this class from being copied */
+    NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
+    /** Default move assignment operator */
+    NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&) = default;
+    /** Destructor */
+    ~NEPoolingAssemblyDispatch();
+
+    /** If supported create an assembly routine, else fallback to Compute Library function.
+     *
+     * @param[in]  input  Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in]  info   Pooling meta-data
+     */
+    void configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info);
+
+    /** Indicates whether or not this function can be used to process the given parameters.
+     *
+     * @param[in] input  Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
+     * @param[in] info   Pooling meta-data
+     *
+     * @return a status.
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
+    /** Was the function successfully configured ?
+     *
+     * @return True if the function is configured and ready to run
+     */
+    bool is_configured() const;
+    // Run method overriden
+    void run(ITensorPack &tensors) override;
+
+private:
+    bool _is_global_pooling_layer{ false };
+};
+} // namespace experimental
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H */
diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp
index 887f00d..0c857b5 100644
--- a/src/runtime/NEON/functions/NEPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -27,73 +27,99 @@
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "src/core/NEON/kernels/NEFillBorderKernel.h"
 #include "src/core/NEON/kernels/NEPoolingLayerKernel.h"
+#include "src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h"
 
 namespace arm_compute
 {
 NEPoolingLayer::~NEPoolingLayer() = default;
 
-NEPoolingLayer::NEPoolingLayer()
-    : _pooling_layer_kernel(), _border_handler(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW)
+NEPoolingLayer::NEPoolingLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_manager(std::move(memory_manager)), _pooling_layer_kernel(), _border_handler(), _asm_glue(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW)
 {
 }
 
 void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info, ITensor *indices)
 {
-    // Check if we have Global Pooling Layer
-    _is_global_pooling_layer = (input->info()->dimension(0) == pool_info.pool_size.width) && (input->info()->dimension(1) == pool_info.pool_size.height);
+    // Check if we can run assembly kernels. Currently, indices are not supported by those kernels
+    const bool run_optimised = bool(NEPoolingAssemblyDispatch::validate(input->info(), output->info(), pool_info)) && (indices == nullptr);
 
-    // Get data layout
-    _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout;
-
-    // Configure pooling kernel
-    _pooling_layer_kernel = std::make_unique<NEPoolingLayerKernel>();
-    _pooling_layer_kernel->configure(input, output, pool_info, indices);
-
-    switch(_data_layout)
+    if(run_optimised)
     {
-        case DataLayout::NCHW:
+        _asm_glue = std::make_unique<NEPoolingAssemblyDispatch>(_memory_manager);
+        _asm_glue->configure(input, output, pool_info);
+        ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
+    }
+    else
+    {
+        // Check if we have Global Pooling Layer
+        _is_global_pooling_layer = (input->info()->dimension(0) == pool_info.pool_size.width) && (input->info()->dimension(1) == pool_info.pool_size.height);
+
+        // Get data layout
+        _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout;
+
+        // Configure pooling kernel
+        _pooling_layer_kernel = std::make_unique<NEPoolingLayerKernel>();
+        _pooling_layer_kernel->configure(input, output, pool_info, indices);
+
+        switch(_data_layout)
         {
-            // Configure border depending on operation required (quantize border in case of asymmetric data_type)
-            BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
-            PixelValue zero_value((indices) ? std::numeric_limits<int>::min() : 0.f);
-            if(is_data_type_quantized_asymmetric(input->info()->data_type()) && !pool_info.exclude_padding)
+            case DataLayout::NCHW:
             {
-                zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+                // Configure border depending on operation required (quantize border in case of asymmetric data_type)
+                BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
+                PixelValue zero_value((indices) ? std::numeric_limits<int>::min() : 0.f);
+                if(is_data_type_quantized_asymmetric(input->info()->data_type()) && !pool_info.exclude_padding)
+                {
+                    zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+                }
+                _border_handler = std::make_unique<NEFillBorderKernel>();
+                _border_handler->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
+                break;
             }
-            _border_handler = std::make_unique<NEFillBorderKernel>();
-            _border_handler->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
-            break;
+            case DataLayout::NHWC:
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Data layout not supported");
         }
-        case DataLayout::NHWC:
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Data layout not supported");
     }
 }
 
 Status NEPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
 {
+    const bool run_optimised = bool(NEPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
+
+    if(run_optimised)
+    {
+        return Status{};
+    }
+
     return NEPoolingLayerKernel::validate(input, output, pool_info, indices);
 }
 
 void NEPoolingLayer::run()
 {
-    switch(_data_layout)
+    if(_asm_glue && _asm_glue->is_configured())
     {
-        case DataLayout::NCHW:
-            // Fill border
-            NEScheduler::get().schedule(_border_handler.get(), Window::DimY);
+        _asm_glue->run();
+    }
+    else
+    {
+        switch(_data_layout)
+        {
+            case DataLayout::NCHW:
+                // Fill border
+                NEScheduler::get().schedule(_border_handler.get(), Window::DimY);
 
-            // Run pooling layer
-            NEScheduler::get().schedule(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY);
-            break;
-        case DataLayout::NHWC:
-            // Run pooling layer
-            NEScheduler::get().schedule(_pooling_layer_kernel.get(), Window::DimX);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Data layout not supported");
+                // Run pooling layer
+                NEScheduler::get().schedule(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY);
+                break;
+            case DataLayout::NHWC:
+                // Run pooling layer
+                NEScheduler::get().schedule(_pooling_layer_kernel.get(), Window::DimX);
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Data layout not supported");
+        }
     }
 }
-
 } // namespace arm_compute