Update Neon™ pooling kernel

- Reduce duplication and simplify overall structure.
- Improve multi-threaded performance by sharing more data
  in lower-level caches.

Partially Resolves: COMPMID-5054
Signed-off-by: Ramy Elgammal<ramy.elgammal@arm.com>
Change-Id: I5f4dc50913401d5c1cbfc10b866fae9490cbc4d7
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7404
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Andrew Mundy
Reviewed-by: Sheri Zhang <sheri.zhang@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/arm_conv/addressing.cpp b/src/core/NEON/kernels/arm_conv/addressing.cpp
new file mode 100644
index 0000000..d01627b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/addressing.cpp
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "addressing.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include <cstring>
+
+namespace arm_conv {
+namespace addressing {
+
+void fill_pointer_array(
+  size_t element_size,
+  void **dest_raw, const unsigned int array_rows, const unsigned int array_cols,
+  void *base_ptr_raw, size_t ld_row, size_t ld_col,
+  void *pad_buffer_raw,
+  const unsigned int pad_top, const unsigned int valid_rows,
+  const unsigned int pad_left, const unsigned int valid_cols
+)
+{
+  auto dest = reinterpret_cast<char **>(dest_raw);
+  auto base_ptr = reinterpret_cast<char *>(base_ptr_raw);
+  auto pad_buffer = reinterpret_cast<char *>(pad_buffer_raw);
+  ld_row *= element_size;
+  ld_col *= element_size;
+
+  const auto last_valid_row = std::min(pad_top + valid_rows, array_rows);
+  const auto last_valid_col = std::min(pad_left + valid_cols, array_cols);
+
+  unsigned int i = 0;
+  for (; i < pad_top; i++)
+  {
+    for (unsigned int j = 0; j < array_cols; j++)
+    {
+      *(dest++) = pad_buffer;
+    }
+  }
+  for (; i < last_valid_row; i++)
+  {
+    unsigned int j = 0;
+    auto colptr = base_ptr;
+    base_ptr += ld_row;
+
+    for (; j < pad_left; j++)
+    {
+      *(dest++) = pad_buffer;
+    }
+    for (; j < last_valid_col; j++)
+    {
+      *(dest++) = colptr;
+      colptr += ld_col;
+    }
+    for (; j < array_cols; j++)
+    {
+      *(dest++) = pad_buffer;
+    }
+  }
+  for (; i < array_rows; i++)
+  {
+    for (unsigned int j = 0; j < array_cols; j++)
+    {
+      *(dest++) = pad_buffer;
+    }
+  }
+}
+
+
+void fill_pointer_array_generic_kernel(
+  const size_t element_size,
+  void **dest_raw,
+  const unsigned int output_rows, const unsigned int output_cols,
+  const unsigned int kernel_rows, const unsigned int kernel_cols,
+  const unsigned int stride_rows, const unsigned int stride_cols,
+  void *base_ptr_raw, size_t ld_row, size_t ld_col,
+  void *pad_buffer_raw,
+  const unsigned int pad_top, const unsigned int valid_rows,
+  const unsigned int pad_left, const unsigned int valid_cols
+)
+{
+  auto dest = reinterpret_cast<char **>(dest_raw);
+  auto base_ptr = reinterpret_cast<char *>(base_ptr_raw);
+  auto pad_buffer = reinterpret_cast<char *>(pad_buffer_raw);
+  ld_row *= element_size;
+  ld_col *= element_size;
+
+  const auto last_valid_row = pad_top + valid_rows;
+  const auto last_valid_col = pad_left + valid_cols;
+  const auto point_stride = output_rows * output_cols;
+
+  // Iterate over the output points, after every point increment the pointer
+  // into the address array.
+  for (unsigned int oi = 0; oi < output_rows; oi++)
+  {
+    for (unsigned int oj = 0; oj < output_cols; oj++)
+    {
+      auto point_dest = dest;
+      dest++;
+
+      // Iterate over kernel points and fill in the pointer array.
+      unsigned int ki = 0, ii = oi*stride_rows;
+      for (; ii < pad_top && ki < kernel_rows; ii++, ki++)
+      {
+        // Fill with padding
+        for (unsigned int j = 0; j < kernel_cols; j++)
+        {
+          *point_dest = pad_buffer;
+          point_dest += point_stride;
+        }
+      }
+      for (; ii < last_valid_row && ki < kernel_rows; ii++, ki++)
+      {
+        unsigned int kj = 0, ij = oj*stride_cols;
+        for (; ij < pad_left && kj < kernel_cols; ij++, kj++)
+        {
+          // Padding
+          *point_dest = pad_buffer;
+          point_dest += point_stride;
+        }
+        for (; ij < last_valid_col && kj < kernel_cols; ij++, kj++)
+        {
+          *point_dest = base_ptr + (ii - pad_top)*ld_row + (ij - pad_left)*ld_col;
+          point_dest += point_stride;
+        }
+        for (; kj < kernel_cols; kj++)
+        {
+          // Padding
+          *point_dest = pad_buffer;
+          point_dest += point_stride;
+        }
+      }
+      for (; ki < kernel_rows; ki++)
+      {
+        // Fill with padding
+        for (unsigned int j = 0; j < kernel_cols; j++)
+        {
+          *point_dest = pad_buffer;
+          point_dest += point_stride;
+        }
+      }
+    }
+  }
+}
+
+/* Patch array constructor
+ *
+ * Some depthwise kernels require an NCHW-ordered patch of input. Here we
+ * construct such a patch, and fill in an array of pointers to the rows of the
+ * patch.
+ */
+void fill_nchw_patch_array(
+  size_t element_size,
+  const void **dest_row_pointers_raw,  // Array of pointers to each row of the patch
+  void *dest_patch_raw,  // Pointer to space which can be used to construct the patch
+  const unsigned int patch_rows, unsigned int patch_cols,  // Patch size
+  const void *src_ptr_raw, size_t ld_row, size_t ld_col,  // Source tensor
+  const void *pad_row,  // Pointer to a row of padding values
+  const unsigned int pad_top, const unsigned int valid_rows,
+  const unsigned int pad_left, const unsigned int valid_cols
+)
+{
+  // Convert into more useful types
+  auto row_pointers = reinterpret_cast<const char **>(dest_row_pointers_raw);
+  auto dest_patch = reinterpret_cast<char *>(dest_patch_raw);
+  auto src = reinterpret_cast<const char *>(src_ptr_raw);
+  ld_row *= element_size;
+  ld_col *= element_size;
+
+  // Round up the patch columns to be a full quad
+  patch_cols = arm_gemm::roundup<unsigned int>(patch_cols, 16 / element_size);
+
+  const auto last_valid_row = std::min(pad_top + valid_rows, patch_rows);
+  const auto last_valid_col = std::min(pad_left + valid_cols, patch_cols);
+
+  // Construct the patch and row pointer array together
+  unsigned int i = 0;
+  for (; i < pad_top; i++)
+  {
+    // Insert pointers into the padding row
+    *(row_pointers++) = reinterpret_cast<const char *>(pad_row);
+  }
+  for (; i < last_valid_row; i++)
+  {
+    // Get a copy of the pointer for this row
+    auto colptr = src;
+    src += ld_row;
+
+    // If the input is already in NCHW format (ld_col == element_size) AND
+    // there is no padding, then we just use a pointer to the source tensor;
+    // otherwise we need to construct a patch and provide a pointer to it.
+    if (ld_col == element_size && pad_left == 0 && last_valid_col == patch_cols)
+    {
+      *(row_pointers++) = colptr;
+    }
+    else
+    {
+      auto patch_col = dest_patch;
+      *(row_pointers++) = dest_patch;
+      dest_patch += element_size * patch_cols;  // Move the patch pointer on
+
+      // Construct the patch; fill the entirety with padding and then copy in
+      // the valid elements.
+      memcpy(patch_col, pad_row, element_size * patch_cols);
+      patch_col += pad_left * element_size;  // Move over the left padding
+
+      if (ld_col == element_size)
+      {
+        // If the input is NCHW then copy across as many columns as we can.
+        memcpy(patch_col, colptr, (last_valid_col - pad_left) * element_size);
+      }
+      else
+      {
+        // If the input is NHWC then copy columns across in turn.
+        for (auto j = pad_left; j < last_valid_col; j++)
+        {
+          memcpy(patch_col, colptr, element_size);  // Copy the valid element
+          patch_col += element_size;  // Progress the patch destination
+          colptr += ld_col;  // Progress the patch source
+        }
+      }
+    }
+  }
+  for (; i < patch_rows; i++)
+  {
+    // Insert pointers into the padding row
+    *(row_pointers++) = reinterpret_cast<const char *>(pad_row);
+  }
+}
+
+
+/* Patch array constructor (generic kernels)
+ *
+ * Construct an array of pointers; one pointer for each output row for each
+ * kernel point. Pointers should point at a whole number of QUADS containing an
+ * input point for each output point. If the kernel column stride is 1 and the
+ * data is NCHW then the input tensor might be addressed directly, otherwise a
+ * new patch sample might need to be constructed.
+ */
+void fill_patch_array_generic_kernel(
+  size_t element_size,
+  const void **dest_pointers_raw,  // Pointers: one per output row per kernel point
+  void *patch_raw,  // Pointer to space which can be used to construct the patch
+  const unsigned int output_rows, const unsigned int output_cols,
+  const unsigned int kernel_rows, const unsigned int kernel_cols,
+  const unsigned int stride_rows, const unsigned int stride_cols,
+  const void *src_ptr_raw, size_t ld_row, size_t ld_col,  // Source tensor
+  const void *pad_row,  // Pointer to a row of padding values
+  const unsigned int pad_top, const unsigned int valid_rows,
+  const unsigned int pad_left, const unsigned int valid_cols
+)
+{
+  auto dest = reinterpret_cast<const char **>(dest_pointers_raw);
+  auto patch = reinterpret_cast<char *>(patch_raw);
+  auto src_ptr = reinterpret_cast<const char *>(src_ptr_raw);
+  ld_row *= element_size;
+  ld_col *= element_size;
+
+  // Round up the patch columns to a multiple of quad-length
+  const auto patch_cols = arm_gemm::roundup<unsigned int>(output_cols, 16 / element_size);
+
+  const auto input_rows = kernel_rows + (output_rows - 1) * stride_rows;
+  const auto last_valid_row = std::min(pad_top + valid_rows, input_rows);
+
+  const auto input_cols = kernel_cols + (output_cols - 1) * stride_cols;
+  const auto last_valid_col = std::min(pad_left + valid_cols, input_cols);
+
+  for (auto ki = 0u; ki < kernel_rows; ki++)
+  {
+    for (auto kj = 0u; kj < kernel_cols; kj++)
+    {
+      auto oi = 0u, ii = ki;
+      for (; oi < output_rows && ii < pad_top; oi++, ii += stride_rows)
+      {
+        // Insert a pointer to the padding row
+        *(dest++) = reinterpret_cast<const char *>(pad_row);
+      }
+      for (; oi < output_rows && ii < last_valid_row; oi++, ii += stride_rows)
+      {
+        auto rowptr = src_ptr + (ii - pad_top) * ld_row;
+
+        // Construct a sample of the input here
+        auto patch_pos = patch;
+        *(dest++) = patch;
+        patch += patch_cols * element_size;
+
+        // Fill with padding
+        memcpy(patch_pos, pad_row, patch_cols * element_size);
+
+        // Fill in the valid elements
+        auto oj = 0u, ij = kj;
+        for (; oj < patch_cols && ij < pad_left; oj++, ij += stride_cols)
+        {
+          // Do nothing for padding
+          patch_pos += element_size;
+        }
+        for (; oj < patch_cols && ij < last_valid_col; oj++, ij += stride_cols)
+        {
+          // Copy from the source tensor
+          memcpy(patch_pos, rowptr + (ij - pad_left)*ld_col, element_size);
+          patch_pos += element_size;
+        }
+        // No action required for right-hand padding
+      }
+      for (; oi < output_rows; oi++)
+      {
+        *(dest++) = reinterpret_cast<const char *>(pad_row);
+      }
+    }
+  }
+}
+
+}  // namespace addressing
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/addressing.hpp b/src/core/NEON/kernels/arm_conv/addressing.hpp
new file mode 100644
index 0000000..35715a3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/addressing.hpp
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* arm_conv kernels share a lot of similarities in how they address input and
+ * output tensors. Consequently, this file contains common approaches to
+ * preparing these tensor descriptions. Generic (i.e., untyped) methods are
+ * contained within the `arm_conv::addressing` namespace, and typed wrappers
+ * are provided within an anonymous namespace within `arm_conv`. The various
+ * methods are described below.
+ */
+
+#include <cstddef>
+
+namespace arm_conv {
+namespace addressing {
+
+/* Pointer array
+ * -------------
+ *
+ * Constructs an array of pointers which point to a `array_rows` x `array_cols`
+ * chunk of a tensor. The array of pointers will be written into `dest`.
+ *
+ * `base_ptr` should point at the first VALID element of the chunk of tensor
+ * (i.e., if there's one padded row, and one padded column, then `base_ptr`
+ * should point at the element which will be at position (1, 1) in the array).
+ * `ld_row` and `ld_col` are in bytes, and describe the strides over rows and
+ * columns (respectively) of the NHWC-ordered tensor. `pad_buffer` should point
+ * at a suitably sized (and initialised) area of memory which can be addressed
+ * by elements of the array which represent padding.
+ *
+ * `pad_top` and `pad_left` describe the padding on the top and left of the
+ * array, respectively, and `valid_rows` and `valid_cols` describe the number
+ * of rows and columns between the element pointed to by `base_ptr` and the
+ * edge of the image (that is `valid_rows` may be greater than `array_rows` and
+ * likewise for the columns).
+ */
+void fill_pointer_array(
+  size_t element_size,
+  void **dest, unsigned int array_rows, unsigned int array_cols,
+  void *base_ptr, size_t ld_row, size_t ld_col,
+  void *pad_buffer,
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+);
+
+/* Interleaved multi-point pointer array
+ * -------------------------------------
+ *
+ * For each point in a `output_rows` x `output_cols` array, constructs
+ * `kernel_rows` x `kernel_cols` array of pointers. The pointers are
+ * interleaved thusly:
+ *
+ *   for ki in kernel_rows:
+ *       for kj in kernel_cols:
+ *           for oi in output_rows:
+ *               for oj in output_cols:
+ *                   get pointer for point (oi*stride_rows + ki, oj*stride_cols + kj)
+ *
+ * Other arguments are as for `fill_pointer_array`.
+ *
+ * The name reflects that this is the form of addressing mode used by "generic"
+ * depthwise and pooling kernels.
+ */
+void fill_pointer_array_generic_kernel(
+  size_t element_size,
+  void **dest,
+  unsigned int output_rows, unsigned int output_cols,
+  unsigned int kernel_rows, unsigned int kernel_cols,
+  unsigned int stride_rows, unsigned int stride_cols,
+  void *base_ptr, size_t ld_row, size_t ld_col,
+  void *pad_buffer,
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+);
+
+/* NCHW-patch addressed by row
+ * ---------------------------
+ *
+ * Construct an array of pointers, each of which points at a row of an
+ * NCHW-ordered patch of a tensor. Memory addressed by the pointers may be
+ * outside of the original tensor, and should therefore not be written to
+ * (modifications will be lost).
+ *
+ * `dest_row_pointers` should point at a `patch_rows` list of pointers; each of
+ * which will point at a 1 x `patch_cols` NCHW-ordered sample of the source
+ * tensor.
+ *
+ * `dest_patch` should point to a `element_size * patch_rows * patch_cols` area
+ * of memory which can be written to by this function to form samples of the
+ * source tensor.
+ *
+ * `src_ptr` should point at the first VALID element of the chunk of tensor
+ * (i.e., if there's one padded row, and one padded column, then `src_ptr`
+ * should point at the element which will be at position (1, 1) in the array).
+ * `ld_row` and `ld_col` are in bytes, and describe the strides over rows and
+ * columns (respectively) of the NHWC-ordered tensor. If `ld_col` ==
+ * `element_size` then copies from the source tensor will be elided and source
+ * data may be addressed directly.
+ *
+ * `pad_row` should point to a `patch_cols` array of (appropriately
+ * initialised) padding values.
+ *
+ * Other arguments are as for `fill_pointer_array`.
+ */
+void fill_nchw_patch_array(
+  size_t element_size,
+  const void **dest_row_pointers,  // Array of pointers to each row of the patch
+  void *dest_patch,  // Pointer to space which can be used to construct the patch
+  unsigned int patch_rows, unsigned int patch_cols,  // Patch size
+  const void *src_ptr, size_t ld_row, size_t ld_col,  // Source tensor
+  const void *pad_row,  // Pointer to a row of padding values
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+);
+
+void fill_patch_array_generic_kernel(
+  size_t element_size,
+  const void **dest_pointers,  // Pointers: one per output row per kernel point
+  void *dest_patch,  // Pointer to space which can be used to construct the patch
+  unsigned int output_rows, unsigned int output_cols,
+  unsigned int kernel_rows, unsigned int kernel_cols,
+  unsigned int stride_rows, unsigned int stride_cols,
+  const void *src_ptr, size_t ld_row, size_t ld_col,  // Source tensor
+  const void *pad_row,  // Pointer to a row of padding values
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+);
+
+}  // namespace addressing
+
+namespace {
+
+/* Pointer array
+ * -------------
+ *
+ * See `addressing::fill_pointer_array`. No copies are made by this method,
+ * memory pointed to by the pointer array is contained within the base tensor
+ * and the padding buffer.
+ */
+template <typename T>
+inline void fill_pointer_array(
+  T **dest, unsigned int array_rows, unsigned int array_cols,
+  T *base_ptr, size_t ld_row, size_t ld_col,
+  T *pad_buffer,
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+)
+{
+  addressing::fill_pointer_array(
+    sizeof(T), (void **) dest, array_rows, array_cols,
+    (void *) base_ptr, ld_row, ld_col,
+    (void *) pad_buffer,
+    pad_top, valid_rows,
+    pad_left, valid_cols
+  );
+}
+
+
+/* Interleaved multi-point pointer array
+ * -------------------------------------
+ *
+ * See `addressing::fill_pointer_array_generic_kernel`. No copies are made by
+ * this method, memory pointed to by the pointer array is contained within the
+ * base tensor and the padding buffer.
+ */
+template <typename T>
+inline void fill_pointer_array_generic_kernel(
+  T **dest,
+  unsigned int output_rows, unsigned int output_cols,
+  unsigned int kernel_rows, unsigned int kernel_cols,
+  unsigned int stride_rows, unsigned int stride_cols,
+  T *base_ptr, size_t ld_row, size_t ld_col,
+  T *pad_buffer,
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+)
+{
+  addressing::fill_pointer_array_generic_kernel(
+    sizeof(T),
+    (void **) dest,
+    output_rows, output_cols,
+    kernel_rows, kernel_cols,
+    stride_rows, stride_cols,
+    (void *) base_ptr, ld_row, ld_col,
+    (void *) pad_buffer,
+    pad_top, valid_rows,
+    pad_left, valid_cols
+  );
+}
+
+template <typename T>
+inline void fill_nchw_patch_array(
+  const T **dest_row_pointers,  // Array of pointers to each row of the patch
+  T *dest_patch,  // Pointer to space which can be used to construct the patch
+  unsigned int patch_rows, unsigned int patch_cols,  // Patch size
+  const T *src_ptr, size_t ld_row, size_t ld_col,  // Source tensor
+  const T *pad_row,  // Pointer to a row of padding values
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+)
+{
+  addressing::fill_nchw_patch_array(
+    sizeof(T),
+    reinterpret_cast<const void **>(dest_row_pointers),
+    reinterpret_cast<void *>(dest_patch),
+    patch_rows, patch_cols,
+    reinterpret_cast<const void *>(src_ptr), ld_row, ld_col,
+    reinterpret_cast<const void *>(pad_row),
+    pad_top, valid_rows,
+    pad_left, valid_cols
+  );
+}
+
+template <typename T>
+inline void fill_patch_array_generic_kernel(
+  const T **dest_pointers,  // Pointers: one per output row per kernel point
+  T *dest_patch,  // Pointer to space which can be used to construct the patch
+  unsigned int output_rows, unsigned int output_cols,
+  unsigned int kernel_rows, unsigned int kernel_cols,
+  unsigned int stride_rows, unsigned int stride_cols,
+  const T *src_ptr, size_t ld_row, size_t ld_col,  // Source tensor
+  const T *pad_row,  // Pointer to a row of padding values
+  unsigned int pad_top, unsigned int valid_rows,
+  unsigned int pad_left, unsigned int valid_cols
+)
+{
+  addressing::fill_patch_array_generic_kernel(
+    sizeof(T),
+    reinterpret_cast<const void **>(dest_pointers),
+    reinterpret_cast<void *>(dest_patch),
+    output_rows, output_cols,
+    kernel_rows, kernel_cols,
+    stride_rows, stride_cols,
+    reinterpret_cast<const void *>(src_ptr), ld_row, ld_col,
+    reinterpret_cast<const void *>(pad_row),
+    pad_top, valid_rows,
+    pad_left, valid_cols
+  );
+}
+
+}  // namespace {anonymous}
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp b/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp
new file mode 100644
index 0000000..8473fc0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "pooling.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+namespace arm_conv {
+namespace pooling {
+
+class IDepthfirstStrategy
+{
+  public:
+  virtual ~IDepthfirstStrategy() = default;
+
+  virtual unsigned int get_input_rows() const = 0;
+  virtual unsigned int get_input_cols() const = 0;
+
+  virtual unsigned int get_output_rows() const = 0;
+  virtual unsigned int get_output_cols() const = 0;
+};
+
+
+template <typename T>
+struct TensorSpec
+{
+  T base;
+  size_t ld_row, ld_col;
+
+  TensorSpec(T ptr, size_t ld_row, size_t ld_col)
+  : base(ptr), ld_row(ld_row), ld_col(ld_col) {}
+};
+
+
+template <typename TInput, typename TOutput>
+class DepthfirstDriver : public PoolingCommon<TInput, TOutput>
+{
+  protected:
+  using Parent = PoolingCommon<TInput, TOutput>;
+
+  // The strategy which we're applying to solve the pooling problem.
+  std::unique_ptr<const IDepthfirstStrategy> m_strat;
+
+  /* Compute the amount of working space required for a single thread. */
+  virtual size_t get_working_size_per_thread(unsigned int n_input_channels) const = 0;
+
+  /* Initialise the working space for a thread. */
+  virtual void initialise_working_space(void *, unsigned int n_input_channels) const = 0;
+
+  /* Compute a portion of the output tensor with padding. */
+  virtual void compute_tile_padded(
+    unsigned int output_i, unsigned int output_j,
+    unsigned int output_channel_start, unsigned int output_channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const = 0;
+
+  /* Compute a portion of the work with only top/bottom padding.
+   *
+   * The default implementation of this repeatedly calls into the padded tile
+   * variant.
+   */
+  virtual void compute_row_padded_tile_row(
+    const unsigned int output_i, unsigned int output_j, unsigned int n_tile_cols,
+    const unsigned int output_channel_start, const unsigned int output_channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const
+  {
+    for (; n_tile_cols; n_tile_cols--, output_j += m_strat->get_output_cols())
+    {
+      this->compute_tile_padded(
+        output_i, output_j, output_channel_start, output_channel_end,
+        input, output, working_space
+      );
+    }
+  }
+
+  /* Compute a portion of the output tensor with no padding.
+   *
+   * The default implementation of this repeatedly calls into the padded
+   * variant.
+   */
+  virtual void compute_tiles_unpadded(
+    unsigned int start_output_i, unsigned int start_output_j,
+    unsigned int n_tile_rows, unsigned int n_tile_cols,
+    unsigned int output_channel_start, unsigned int output_channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const
+  {
+    for (unsigned int tile_i = 0; tile_i < n_tile_rows; tile_i++)
+    {
+      this->compute_row_padded_tile_row(
+        start_output_i, start_output_j, n_tile_cols,
+        output_channel_start, output_channel_end,
+        input, output, working_space
+      );
+      start_output_i += m_strat->get_output_rows();
+    }
+  }
+
+  void execute_internal(
+    unsigned int n_batches,
+    unsigned int input_height,
+    unsigned int input_width,
+    unsigned int n_channels,
+    const PaddingValues &padding,
+    const void *input,
+    size_t ld_input_col,
+    size_t ld_input_row,
+    size_t ld_input_batch,
+    unsigned int output_height,
+    unsigned int output_width,
+    void *output,
+    size_t ld_output_col,
+    size_t ld_output_row,
+    size_t ld_output_batch,
+    void *working_space,
+    unsigned int thread_id,
+    unsigned int n_threads
+  ) const override
+  {
+    // Get and initialise the working space for this thread.
+    void *thread_working_space =
+      static_cast<uint8_t *>(working_space) + thread_id * this->get_working_size_per_thread(n_channels);
+    this->initialise_working_space(thread_working_space, n_channels);
+
+    // Construct convenient representations of the input/output tensors.
+    TensorSpec<const TInput *> input_tensor(reinterpret_cast<const TInput *>(input), ld_input_row, ld_input_col);
+    TensorSpec<TOutput *> output_tensor(reinterpret_cast<TOutput *>(output), ld_output_row, ld_output_col);
+
+    // If the output is a 1x1 tensor, which commonly occurs at the end of a
+    // network, then we change the threading strategy to parallelise over
+    // channels rather than rows of the tensor.
+    if (n_threads > 1 && output_height == 1 && output_width == 1)
+    {
+      // Determine how many channels should be assigned to each thread, we
+      // round up first to ensure we get a reasonable spread across the
+      // threads.
+      const auto channels_per_thread = arm_gemm::roundup(arm_gemm::roundup(n_channels, 16u), n_threads) / n_threads;
+      const auto start_channel = thread_id * channels_per_thread;
+      const auto end_channel = std::min(start_channel + channels_per_thread, n_channels);
+
+      if (start_channel >= end_channel)
+      {
+        // This thread should move on if we have insufficient work to do.
+        return;
+      }
+
+      for (; n_batches; n_batches--)
+      {
+        // We know we don't need to iterate over rows or columns here; so just
+        // execute the tile.
+        this->compute_tile_padded(
+          0, 0,  // Compute the only output point
+          start_channel, end_channel,
+          input_tensor, output_tensor, thread_working_space
+        );
+
+        // Progress the pointers for the next batch.
+        input_tensor.base += ld_input_batch;
+        output_tensor.base += ld_output_batch;
+      }
+
+      // Exit here, since we've done all the work using the different strategy.
+      return;
+    }
+
+    for (unsigned int batch = 0; batch < n_batches; batch++)
+    {
+      // Iterate over rows of the output tensor; we stripe over the tiles.
+      for (unsigned int start_output_i = thread_id * m_strat->get_output_rows();
+           start_output_i < output_height;
+           start_output_i += n_threads * m_strat->get_output_rows())
+      {
+        // Determine what (if any padding) is required on the top/bottom of
+        // this row of the convolution.
+        const auto end_output_i = start_output_i + m_strat->get_output_rows();
+        const bool pad_output_bottom = output_height < end_output_i;
+
+        const int start_input_i = start_output_i * this->m_args.pool_stride.rows - padding.top;
+        const bool pad_input_top = start_input_i < 0;
+        const int end_input_i = start_input_i + m_strat->get_input_rows();
+        const bool pad_input_bottom = static_cast<int>(input_height) < end_input_i;
+        const bool pad_row = pad_input_top || pad_input_bottom || pad_output_bottom;
+
+        // Iterate over the columns of the output tensor; we attempt to grab as
+        // much as possible of the unpadded regions, so the loop structure is a
+        // bit odd.
+        unsigned int start_output_j = 0;
+        while (start_output_j < output_width)
+        {
+          const int start_in_j = start_output_j * this->m_args.pool_stride.cols - padding.left;
+          const bool pad_input_left = start_in_j < 0;
+
+          // Determine if we can process a number of unpadded tiles in one go.
+          int n_unpadded_tiles = 0;
+          if (!pad_input_left)
+          {
+            // Determine the maximum number of tiles we could handle.
+            n_unpadded_tiles = (output_width - start_output_j) / m_strat->get_output_cols();
+
+            // Handle padding on the right hand edge
+            const int tile_stride = m_strat->get_output_cols() * this->m_args.pool_stride.cols;
+            int end_output_j = start_output_j + n_unpadded_tiles * m_strat->get_output_cols();
+            int end_input_j = start_in_j + m_strat->get_input_cols() + (n_unpadded_tiles - 1)*tile_stride;
+
+            while (n_unpadded_tiles > 0 &&
+                   (static_cast<int>(output_width) < end_output_j ||
+                    static_cast<int>(input_width) < end_input_j))
+            {
+              n_unpadded_tiles--;
+              end_output_j -= m_strat->get_output_cols();
+              end_input_j -= tile_stride;
+            }
+          }
+
+          // Process unpadded tiles, if possible, otherwise process a padded tile.
+          if (n_unpadded_tiles)
+          {
+            if (!pad_row)
+            {
+              // Completely unpadded execution
+              this->compute_tiles_unpadded(
+                start_output_i, start_output_j,
+                1, n_unpadded_tiles,  // Compute a row of unpadded tiles
+                0, n_channels,  // Compute all channels
+                input_tensor, output_tensor, thread_working_space
+              );
+            }
+            else
+            {
+              // Top/bottom padding only
+              this->compute_row_padded_tile_row(
+                start_output_i, start_output_j, n_unpadded_tiles,
+                0, n_channels,  // Compute all channels
+                input_tensor, output_tensor, thread_working_space
+              );
+            }
+            start_output_j += n_unpadded_tiles * m_strat->get_output_cols();
+          }
+          else
+          {
+            this->compute_tile_padded(
+              start_output_i, start_output_j,
+              0, n_channels,  // Compute all channels
+              input_tensor, output_tensor, thread_working_space
+            );
+            start_output_j += m_strat->get_output_cols();
+          }
+        }
+      }
+
+      // Progress the pointers for the next batch.
+      input_tensor.base += ld_input_batch;
+      output_tensor.base += ld_output_batch;
+    }
+  }
+
+  public:
+  DepthfirstDriver(const IDepthfirstStrategy *strategy, const PoolingArgs &args)
+  : Parent(args), m_strat(strategy)
+  {
+  }
+
+  size_t get_working_size(unsigned int n_threads) const override
+  {
+    return this->get_working_size(n_threads, this->m_args.n_channels);
+  }
+
+  size_t get_working_size(unsigned int n_threads, unsigned int n_channels) const override final
+  {
+    return n_threads * this->get_working_size_per_thread(n_channels);
+  }
+};
+
+}  // namespace pooling
+}  // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
index 178db4a..a670bb8 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst
+struct a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
 
-  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+  a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 3; }
-  constexpr static unsigned int pool_cols(void) { return 3; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
-
-  a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 89dbf5c..4d71f94 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,7 +26,7 @@
 #include <cstddef>
 #include <cstdint>
 
-#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
 
 namespace arm_conv {
 namespace pooling {
@@ -83,12 +83,12 @@
 
   __asm__ __volatile__(
     "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x5, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x6, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x4, #0x8\n"
-    "ldp x7, x8, [x20, #0x0]\n"
+    "mov x5, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x6, x7, [x20, #0x0]\n"
+    "mov x8, #0x0\n"
     "ldp x17, x16, [x20, #0x10]\n"
     "ldp x15, x14, [x19, #0x0]\n"
     "ldp x13, x12, [x19, #0x10]\n"
@@ -98,158 +98,157 @@
     "ldp x25, x24, [x19, #0x50]\n"
     "ldp x23, x22, [x19, #0x60]\n"
     "ldp x21, x20, [x19, #0x70]\n"
-    "ldr d8, [%x[args], %[offsetof_rescale]]\n"
+    "ldr d7, [%x[args], %[offsetof_rescale]]\n"
     "blt 3f\n"
-    "ldr q7, [x10, x5]\n"
     "lsr x19, x4, #0x3\n"
-    "ldr q6, [x9, x5]\n"
     "sub x4, x4, x19, LSL #3\n"
-    "ldr q5, [x26, x5]\n"
+    "ldr q6, [x10, x5]\n"
+    "ldr q5, [x9, x5]\n"
     "subs x19, x19, #0x1\n"
-    "ldr q4, [x25, x5]\n"
-    "ldr q3, [x14, x5]\n"
-    "ldr q2, [x13, x5]\n"
-    "ldr q1, [x11, x5]\n"
-    "ldr q0, [x27, x5]\n"
-    "ldr q31, [x28, x5]\n"
-    "ldr q30, [x24, x5]\n"
-    "ldr q29, [x22, x5]\n"
-    "ldr q28, [x21, x5]\n"
-    "ldr q27, [x15, x5]\n"
-    "ldr q26, [x12, x5]\n"
-    "ldr q25, [x23, x5]\n"
-    "ldr q24, [x20, x5]\n"
+    "ldr q4, [x26, x5]\n"
+    "ldr q3, [x25, x5]\n"
+    "ldr q2, [x14, x5]\n"
+    "ldr q1, [x13, x5]\n"
+    "ldr q0, [x11, x5]\n"
+    "ldr q31, [x27, x5]\n"
+    "ldr q30, [x28, x5]\n"
+    "ldr q29, [x24, x5]\n"
+    "ldr q28, [x22, x5]\n"
+    "ldr q27, [x21, x5]\n"
+    "ldr q26, [x15, x5]\n"
+    "ldr q25, [x12, x5]\n"
+    "ldr q24, [x23, x5]\n"
+    "ldr q23, [x20, x5]\n"
     "add x5, x5, #0x10\n"
     "beq 2f\n"
     "1:"  // Vector: Loop
-    "fadd v17.8h, v7.8h, v6.8h\n"
-    "ldr q7, [x10, x5]\n"
+    "fadd v17.8h, v6.8h, v5.8h\n"
+    "fadd v16.8h, v4.8h, v3.8h\n"
     "subs x19, x19, #0x1\n"
-    "fadd v16.8h, v5.8h, v4.8h\n"
-    "ldr q6, [x9, x5]\n"
-    "fadd v18.8h, v3.8h, v2.8h\n"
-    "ldr q5, [x26, x5]\n"
-    "fadd v23.8h, v1.8h, v0.8h\n"
-    "ldr q4, [x25, x5]\n"
-    "fadd v22.8h, v31.8h, v30.8h\n"
-    "ldr q3, [x14, x5]\n"
-    "fadd v17.8h, v17.8h, v16.8h\n"
-    "ldr q2, [x13, x5]\n"
-    "fadd v16.8h, v29.8h, v28.8h\n"
-    "ldr q1, [x11, x5]\n"
-    "fadd v19.8h, v27.8h, v23.8h\n"
-    "ldr q0, [x27, x5]\n"
-    "fadd v21.8h, v18.8h, v17.8h\n"
-    "ldr q31, [x28, x5]\n"
-    "fadd v20.8h, v16.8h, v17.8h\n"
-    "ldr q30, [x24, x5]\n"
-    "fadd v18.8h, v26.8h, v22.8h\n"
-    "ldr q29, [x22, x5]\n"
-    "fadd v17.8h, v25.8h, v23.8h\n"
-    "ldr q28, [x21, x5]\n"
-    "fadd v16.8h, v24.8h, v22.8h\n"
-    "ldr q27, [x15, x5]\n"
-    "fadd v19.8h, v21.8h, v19.8h\n"
-    "ldr q26, [x12, x5]\n"
-    "fadd v18.8h, v21.8h, v18.8h\n"
-    "ldr q25, [x23, x5]\n"
+    "ldr q6, [x10, x5]\n"
+    "fadd v19.8h, v17.8h, v16.8h\n"
+    "fadd v18.8h, v2.8h, v1.8h\n"
+    "ldr q5, [x9, x5]\n"
+    "ldr q4, [x26, x5]\n"
+    "fadd v17.8h, v0.8h, v31.8h\n"
+    "fadd v22.8h, v30.8h, v29.8h\n"
+    "ldr q3, [x25, x5]\n"
+    "ldr q2, [x14, x5]\n"
+    "fadd v16.8h, v28.8h, v27.8h\n"
+    "fadd v21.8h, v18.8h, v19.8h\n"
+    "ldr q1, [x13, x5]\n"
+    "ldr q0, [x11, x5]\n"
+    "fadd v20.8h, v16.8h, v19.8h\n"
+    "fadd v19.8h, v26.8h, v17.8h\n"
+    "ldr q31, [x27, x5]\n"
+    "ldr q30, [x28, x5]\n"
+    "fadd v18.8h, v25.8h, v22.8h\n"
+    "fadd v17.8h, v24.8h, v17.8h\n"
+    "ldr q29, [x24, x5]\n"
+    "ldr q28, [x22, x5]\n"
+    "fadd v16.8h, v23.8h, v22.8h\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "ldr q27, [x21, x5]\n"
+    "ldr q26, [x15, x5]\n"
+    "fadd v18.8h, v18.8h, v21.8h\n"
     "fadd v17.8h, v17.8h, v20.8h\n"
-    "ldr q24, [x20, x5]\n"
+    "ldr q25, [x12, x5]\n"
+    "ldr q24, [x23, x5]\n"
+    "fadd v16.8h, v16.8h, v20.8h\n"
+    "fmul v19.8h, v19.8h, v7.h[0]\n"
+    "ldr q23, [x20, x5]\n"
     "add x5, x5, #0x10\n"
-    "fadd v16.8h, v20.8h, v16.8h\n"
-    "fmul v19.8h, v19.8h, v8.h[0]\n"
-    "str q19, [x7, x6]\n"
-    "fmul v18.8h, v18.8h, v8.h[1]\n"
-    "fmul v17.8h, v17.8h, v8.h[2]\n"
-    "str q18, [x8, x6]\n"
-    "fmul v16.8h, v16.8h, v8.h[3]\n"
-    "str q17, [x17, x6]\n"
-    "str q16, [x16, x6]\n"
-    "add x6, x6, #0x10\n"
+    "fmul v18.8h, v18.8h, v7.h[1]\n"
+    "fmul v17.8h, v17.8h, v7.h[2]\n"
+    "str q19, [x6, x8]\n"
+    "fmul v16.8h, v16.8h, v7.h[3]\n"
+    "str q18, [x7, x8]\n"
+    "str q17, [x17, x8]\n"
+    "str q16, [x16, x8]\n"
+    "add x8, x8, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
-    "fadd v17.8h, v7.8h, v6.8h\n"
-    "fadd v16.8h, v5.8h, v4.8h\n"
-    "fadd v18.8h, v3.8h, v2.8h\n"
-    "fadd v23.8h, v1.8h, v0.8h\n"
-    "fadd v17.8h, v17.8h, v16.8h\n"
-    "fadd v22.8h, v31.8h, v30.8h\n"
-    "fadd v16.8h, v29.8h, v28.8h\n"
-    "fadd v21.8h, v18.8h, v17.8h\n"
-    "fadd v19.8h, v27.8h, v23.8h\n"
-    "fadd v20.8h, v16.8h, v17.8h\n"
-    "fadd v18.8h, v26.8h, v22.8h\n"
-    "fadd v17.8h, v25.8h, v23.8h\n"
-    "fadd v16.8h, v24.8h, v22.8h\n"
-    "fadd v19.8h, v21.8h, v19.8h\n"
-    "fadd v18.8h, v21.8h, v18.8h\n"
+    "fadd v17.8h, v6.8h, v5.8h\n"
+    "fadd v16.8h, v4.8h, v3.8h\n"
+    "fadd v19.8h, v17.8h, v16.8h\n"
+    "fadd v18.8h, v2.8h, v1.8h\n"
+    "fadd v17.8h, v0.8h, v31.8h\n"
+    "fadd v22.8h, v30.8h, v29.8h\n"
+    "fadd v16.8h, v28.8h, v27.8h\n"
+    "fadd v21.8h, v18.8h, v19.8h\n"
+    "fadd v20.8h, v16.8h, v19.8h\n"
+    "fadd v19.8h, v26.8h, v17.8h\n"
+    "fadd v18.8h, v25.8h, v22.8h\n"
+    "fadd v17.8h, v24.8h, v17.8h\n"
+    "fadd v16.8h, v23.8h, v22.8h\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "fadd v18.8h, v18.8h, v21.8h\n"
     "fadd v17.8h, v17.8h, v20.8h\n"
-    "fadd v16.8h, v20.8h, v16.8h\n"
-    "fmul v19.8h, v19.8h, v8.h[0]\n"
-    "str q19, [x7, x6]\n"
-    "fmul v18.8h, v18.8h, v8.h[1]\n"
-    "fmul v17.8h, v17.8h, v8.h[2]\n"
-    "str q18, [x8, x6]\n"
-    "fmul v16.8h, v16.8h, v8.h[3]\n"
-    "str q17, [x17, x6]\n"
-    "str q16, [x16, x6]\n"
-    "add x6, x6, #0x10\n"
+    "fadd v16.8h, v16.8h, v20.8h\n"
+    "fmul v19.8h, v19.8h, v7.h[0]\n"
+    "str q19, [x6, x8]\n"
+    "fmul v18.8h, v18.8h, v7.h[1]\n"
+    "fmul v17.8h, v17.8h, v7.h[2]\n"
+    "str q18, [x7, x8]\n"
+    "fmul v16.8h, v16.8h, v7.h[3]\n"
+    "str q17, [x17, x8]\n"
+    "str q16, [x16, x8]\n"
+    "add x8, x8, #0x10\n"
     "cbz x4, 4f\n"
     "3:"  // Oddments
-    "ldr h7, [x10, x5]\n"
+    "ldr h6, [x10, x5]\n"
+    "ldr h5, [x9, x5]\n"
+    "fadd v17.8h, v6.8h, v5.8h\n"
     "subs x4, x4, #0x1\n"
-    "ldr h6, [x9, x5]\n"
-    "fadd v17.8h, v7.8h, v6.8h\n"
-    "ldr h5, [x26, x5]\n"
-    "ldr h4, [x25, x5]\n"
-    "fadd v16.8h, v5.8h, v4.8h\n"
-    "ldr h3, [x14, x5]\n"
-    "ldr h2, [x13, x5]\n"
-    "fadd v17.8h, v17.8h, v16.8h\n"
-    "ldr h1, [x11, x5]\n"
-    "ldr h0, [x27, x5]\n"
-    "fadd v18.8h, v3.8h, v2.8h\n"
-    "ldr h31, [x28, x5]\n"
-    "fadd v23.8h, v1.8h, v0.8h\n"
-    "ldr h30, [x24, x5]\n"
-    "fadd v21.8h, v18.8h, v17.8h\n"
-    "ldr h29, [x22, x5]\n"
-    "ldr h28, [x21, x5]\n"
-    "fadd v22.8h, v31.8h, v30.8h\n"
-    "ldr h27, [x15, x5]\n"
-    "ldr h26, [x12, x5]\n"
-    "fadd v16.8h, v29.8h, v28.8h\n"
-    "ldr h25, [x23, x5]\n"
-    "fadd v20.8h, v16.8h, v17.8h\n"
-    "ldr h24, [x20, x5]\n"
+    "ldr h4, [x26, x5]\n"
+    "ldr h3, [x25, x5]\n"
+    "fadd v16.8h, v4.8h, v3.8h\n"
+    "fadd v19.8h, v17.8h, v16.8h\n"
+    "ldr h2, [x14, x5]\n"
+    "ldr h1, [x13, x5]\n"
+    "fadd v18.8h, v2.8h, v1.8h\n"
+    "fadd v21.8h, v18.8h, v19.8h\n"
+    "ldr h0, [x11, x5]\n"
+    "ldr h31, [x27, x5]\n"
+    "fadd v17.8h, v0.8h, v31.8h\n"
+    "ldr h30, [x28, x5]\n"
+    "ldr h29, [x24, x5]\n"
+    "fadd v22.8h, v30.8h, v29.8h\n"
+    "ldr h28, [x22, x5]\n"
+    "ldr h27, [x21, x5]\n"
+    "fadd v16.8h, v28.8h, v27.8h\n"
+    "fadd v20.8h, v16.8h, v19.8h\n"
+    "ldr h26, [x15, x5]\n"
+    "ldr h25, [x12, x5]\n"
+    "fadd v19.8h, v26.8h, v17.8h\n"
+    "fadd v18.8h, v25.8h, v22.8h\n"
+    "ldr h24, [x23, x5]\n"
+    "ldr h23, [x20, x5]\n"
+    "fadd v17.8h, v24.8h, v17.8h\n"
+    "fadd v16.8h, v23.8h, v22.8h\n"
+    "fadd v19.8h, v19.8h, v21.8h\n"
+    "fadd v18.8h, v18.8h, v21.8h\n"
     "add x5, x5, #0x2\n"
-    "fadd v19.8h, v27.8h, v23.8h\n"
-    "fadd v18.8h, v26.8h, v22.8h\n"
-    "fadd v17.8h, v25.8h, v23.8h\n"
-    "fadd v16.8h, v24.8h, v22.8h\n"
-    "fadd v19.8h, v21.8h, v19.8h\n"
-    "fadd v18.8h, v21.8h, v18.8h\n"
     "fadd v17.8h, v17.8h, v20.8h\n"
-    "fadd v16.8h, v20.8h, v16.8h\n"
-    "fmul v19.8h, v19.8h, v8.h[0]\n"
-    "str h19, [x7, x6]\n"
-    "fmul v18.8h, v18.8h, v8.h[1]\n"
-    "fmul v17.8h, v17.8h, v8.h[2]\n"
-    "str h18, [x8, x6]\n"
-    "fmul v16.8h, v16.8h, v8.h[3]\n"
-    "str h17, [x17, x6]\n"
-    "str h16, [x16, x6]\n"
-    "add x6, x6, #0x2\n"
+    "fadd v16.8h, v16.8h, v20.8h\n"
+    "fmul v19.8h, v19.8h, v7.h[0]\n"
+    "fmul v18.8h, v18.8h, v7.h[1]\n"
+    "str h19, [x6, x8]\n"
+    "fmul v17.8h, v17.8h, v7.h[2]\n"
+    "fmul v16.8h, v16.8h, v7.h[3]\n"
+    "str h18, [x7, x8]\n"
+    "str h17, [x17, x8]\n"
+    "str h16, [x16, x8]\n"
+    "add x8, x8, #0x2\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp
index 9dc153a..25e7af1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_fp16_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
 
-struct a64_fp16_nhwc_avg_generic_depthfirst
+struct a64_fp16_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_fp16_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
   a64_fp16_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_fp16_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 5bef7f2..fe6f4c2 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,7 +22,9 @@
  * SOFTWARE.
  */
 
+
 #include <cstdint>
+#include <cstddef>
 
 #if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
 
@@ -41,31 +43,31 @@
   const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
 
   __asm__ __volatile__(
-    "ld1r { v8.8h }, [%x[rescale_ptr]]\n"
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x20\n"
+    "ld1r { v7.8h }, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v6.16b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
     "movi v5.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -78,47 +80,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fadd v23.8h, v3.8h, v2.8h\n"
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd v19.8h, v1.8h, v0.8h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fadd v22.8h, v31.8h, v30.8h\n"
-    "ldr q3, [x23, x28]\n"
+    "fadd v22.8h, v30.8h, v22.8h\n"
     "fadd v18.8h, v29.8h, v28.8h\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fadd v21.8h, v27.8h, v21.8h\n"
-    "ldr q2, [x22, x28]\n"
     "fadd v17.8h, v26.8h, v17.8h\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "fadd v20.8h, v25.8h, v20.8h\n"
-    "ldr q0, [x20, x28]\n"
     "fadd v16.8h, v24.8h, v16.8h\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "fadd v19.8h, v23.8h, v19.8h\n"
-    "ldr q30, [x22, x27]\n"
     "fadd v18.8h, v22.8h, v18.8h\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "fadd v17.8h, v21.8h, v17.8h\n"
-    "ldr q28, [x20, x27]\n"
     "fadd v16.8h, v20.8h, v16.8h\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "fadd v5.8h, v5.8h, v18.8h\n"
     "ldr q27, [x23, x26]\n"
-    "fadd v7.8h, v7.8h, v19.8h\n"
     "ldr q21, [x22, x26]\n"
-    "fadd v6.8h, v6.8h, v18.8h\n"
+    "fadd v4.8h, v4.8h, v17.8h\n"
+    "fadd v3.8h, v3.8h, v16.8h\n"
     "ldr q26, [x21, x26]\n"
-    "fadd v5.8h, v5.8h, v17.8h\n"
     "ldr q17, [x20, x26]\n"
-    "fadd v4.8h, v4.8h, v16.8h\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fadd v23.8h, v3.8h, v2.8h\n"
-    "fadd v19.8h, v1.8h, v0.8h\n"
-    "fadd v22.8h, v31.8h, v30.8h\n"
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
+    "fadd v22.8h, v30.8h, v22.8h\n"
     "fadd v18.8h, v29.8h, v28.8h\n"
     "fadd v21.8h, v27.8h, v21.8h\n"
     "fadd v17.8h, v26.8h, v17.8h\n"
@@ -128,221 +130,219 @@
     "fadd v18.8h, v22.8h, v18.8h\n"
     "fadd v17.8h, v21.8h, v17.8h\n"
     "fadd v16.8h, v20.8h, v16.8h\n"
-    "fadd v7.8h, v7.8h, v19.8h\n"
-    "fadd v6.8h, v6.8h, v18.8h\n"
-    "fadd v5.8h, v5.8h, v17.8h\n"
-    "fadd v4.8h, v4.8h, v16.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "fadd v5.8h, v5.8h, v18.8h\n"
+    "fadd v4.8h, v4.8h, v17.8h\n"
+    "fadd v3.8h, v3.8h, v16.8h\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fadd v7.8h, v7.8h, v3.8h\n"
-    "ldr q31, [x23, x27]\n"
+    "fadd v6.8h, v6.8h, v2.8h\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "fadd v6.8h, v6.8h, v31.8h\n"
+    "fadd v5.8h, v5.8h, v30.8h\n"
+    "fadd v4.8h, v4.8h, v27.8h\n"
     "ldr q25, [x23, x25]\n"
-    "fadd v5.8h, v5.8h, v27.8h\n"
-    "fadd v4.8h, v4.8h, v25.8h\n"
+    "fadd v3.8h, v3.8h, v25.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "fmul v7.8h, v7.8h, v8.8h\n"
-    "str q7, [%x[outptr], x28]\n"
-    "fmul v6.8h, v6.8h, v8.8h\n"
-    "add x28, x28, #0x40\n"
-    "fmul v5.8h, v5.8h, v8.8h\n"
-    "str q6, [%x[outptr], x27]\n"
-    "fmul v4.8h, v4.8h, v8.8h\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x20\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "cmp %x[n_channels], #0x20\n"
+    "fmul v6.8h, v6.8h, v7.8h\n"
+    "fmul v5.8h, v5.8h, v7.8h\n"
+    "fmul v4.8h, v4.8h, v7.8h\n"
+    "fmul v3.8h, v3.8h, v7.8h\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x40\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 31f\n"
     "7:"  // Single vector of channels
     "cmp %x[n_channels], #0x8\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fadd v23.8h, v3.8h, v2.8h\n"
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd v19.8h, v1.8h, v0.8h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fadd v19.8h, v23.8h, v19.8h\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "fadd v7.8h, v7.8h, v19.8h\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fadd v23.8h, v3.8h, v2.8h\n"
-    "fadd v19.8h, v1.8h, v0.8h\n"
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
     "fadd v19.8h, v23.8h, v19.8h\n"
-    "fadd v7.8h, v7.8h, v19.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fadd v7.8h, v7.8h, v3.8h\n"
+    "fadd v6.8h, v6.8h, v2.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "fmul v7.8h, v7.8h, v8.8h\n"
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x8\n"
     "cmp %x[n_channels], #0x8\n"
+    "fmul v6.8h, v6.8h, v7.8h\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 31f\n"
     "14:"  // Oddments
-    "movi v7.16b, #0x0\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 20f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "b 19f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "b 19f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "b 19f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 19f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 2: End
-    "fadd v23.8h, v3.8h, v2.8h\n"
+    "fadd v23.8h, v2.8h, v1.8h\n"
+    "fadd v19.8h, v0.8h, v31.8h\n"
     "subs x24, x24, #0x1\n"
-    "fadd v19.8h, v1.8h, v0.8h\n"
     "fadd v19.8h, v23.8h, v19.8h\n"
-    "fadd v7.8h, v7.8h, v19.8h\n"
+    "fadd v6.8h, v6.8h, v19.8h\n"
     "bgt 15b\n"
     "20:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 26f\n"
     "21:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #2, 23f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #1, 22f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "b 25f\n"
     "22:"  // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "b 25f\n"
     "23:"  // Oddments: Single input loop: Load: Bit 2: Unset
     "tbz %x[n_channels], #1, 24f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "b 25f\n"
     "24:"  // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 25f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "25:"  // Oddments: Single input loop: Load: Bit 2: End
-    "fadd v7.8h, v7.8h, v3.8h\n"
     "subs x20, x20, #0x1\n"
+    "fadd v6.8h, v6.8h, v2.8h\n"
     "bgt 21b\n"
     "26:"  // Oddments: Single input loop: End
-    "fmul v7.8h, v7.8h, v8.8h\n"
+    "fmul v6.8h, v6.8h, v7.8h\n"
     "tbz %x[n_channels], #2, 28f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #1, 27f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[6], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "27:"  // Oddments: Store: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[4], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "28:"  // Oddments: Store: Bit 2: Unset
     "tbz %x[n_channels], #1, 29f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[2], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "29:"  // Oddments: Store: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[0], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
     "30:"  // Oddments: Store: Bit 2: End
-
     "31:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 9950bb8..23a9164 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst
+struct a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
 
-  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 1c461ee..b12d090 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,11 +22,10 @@
  * SOFTWARE.
  */
 
-
 #include <cstddef>
 #include <cstdint>
 
-#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
 
 namespace arm_conv {
 namespace pooling {
@@ -64,12 +63,12 @@
 
   __asm__ __volatile__(
     "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x14, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x13, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x15, #0x8\n"
-    "ldp x12, x11, [x20, #0x0]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x13, x12, [x20, #0x0]\n"
+    "mov x11, #0x0\n"
     "ldp x10, x9, [x20, #0x10]\n"
     "ldp x28, x27, [x19, #0x0]\n"
     "ldp x26, x25, [x19, #0x10]\n"
@@ -77,12 +76,12 @@
     "ldp x22, x21, [x19, #0x30]\n"
     "ldr x20, [x19, #0x40]\n"
     "blt 3f\n"
-    "ldr q30, [x27, x14]\n"
     "lsr x19, x15, #0x3\n"
-    "ldr q29, [x24, x14]\n"
     "sub x15, x15, x19, LSL #3\n"
-    "ldr q28, [x21, x14]\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
     "subs x19, x19, #0x1\n"
+    "ldr q28, [x21, x14]\n"
     "ldr q27, [x25, x14]\n"
     "ldr q26, [x28, x14]\n"
     "ldr q25, [x23, x14]\n"
@@ -93,31 +92,31 @@
     "beq 2f\n"
     "1:"  // Vector: Loop
     "fmax v21.8h, v30.8h, v29.8h\n"
-    "ldr q30, [x27, x14]\n"
-    "subs x19, x19, #0x1\n"
     "fmax v20.8h, v29.8h, v28.8h\n"
-    "ldr q29, [x24, x14]\n"
+    "subs x19, x19, #0x1\n"
+    "ldr q30, [x27, x14]\n"
     "fmax v19.8h, v27.8h, v26.8h\n"
-    "ldr q28, [x21, x14]\n"
     "fmax v18.8h, v25.8h, v24.8h\n"
-    "ldr q26, [x28, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
     "fmax v17.8h, v23.8h, v27.8h\n"
-    "ldr q27, [x25, x14]\n"
     "fmax v16.8h, v25.8h, v22.8h\n"
-    "ldr q25, [x23, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
     "fmax v19.8h, v21.8h, v19.8h\n"
+    "fmax v18.8h, v18.8h, v21.8h\n"
+    "ldr q25, [x23, x14]\n"
     "ldr q24, [x26, x14]\n"
-    "fmax v18.8h, v21.8h, v18.8h\n"
-    "ldr q23, [x22, x14]\n"
     "fmax v17.8h, v20.8h, v17.8h\n"
+    "fmax v16.8h, v20.8h, v16.8h\n"
+    "ldr q23, [x22, x14]\n"
     "ldr q22, [x20, x14]\n"
     "add x14, x14, #0x10\n"
-    "fmax v16.8h, v20.8h, v16.8h\n"
-    "str q19, [x12, x13]\n"
-    "str q18, [x11, x13]\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q19, [x13, x11]\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
     "fmax v21.8h, v30.8h, v29.8h\n"
@@ -127,45 +126,44 @@
     "fmax v17.8h, v23.8h, v27.8h\n"
     "fmax v16.8h, v25.8h, v22.8h\n"
     "fmax v19.8h, v21.8h, v19.8h\n"
-    "str q19, [x12, x13]\n"
-    "fmax v18.8h, v21.8h, v18.8h\n"
+    "fmax v18.8h, v18.8h, v21.8h\n"
+    "str q19, [x13, x11]\n"
     "fmax v17.8h, v20.8h, v17.8h\n"
-    "str q18, [x11, x13]\n"
     "fmax v16.8h, v20.8h, v16.8h\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "cbz x15, 4f\n"
     "3:"  // Oddments
     "ldr h30, [x27, x14]\n"
-    "subs x15, x15, #0x1\n"
     "ldr h29, [x24, x14]\n"
     "fmax v21.8h, v30.8h, v29.8h\n"
+    "subs x15, x15, #0x1\n"
     "ldr h28, [x21, x14]\n"
     "ldr h27, [x25, x14]\n"
     "fmax v20.8h, v29.8h, v28.8h\n"
     "ldr h26, [x28, x14]\n"
     "ldr h25, [x23, x14]\n"
     "fmax v19.8h, v27.8h, v26.8h\n"
+    "fmax v19.8h, v21.8h, v19.8h\n"
     "ldr h24, [x26, x14]\n"
     "ldr h23, [x22, x14]\n"
-    "fmax v19.8h, v21.8h, v19.8h\n"
-    "ldr h22, [x20, x14]\n"
-    "add x14, x14, #0x2\n"
     "fmax v18.8h, v25.8h, v24.8h\n"
-    "str h19, [x12, x13]\n"
     "fmax v17.8h, v23.8h, v27.8h\n"
+    "ldr h22, [x20, x14]\n"
     "fmax v16.8h, v25.8h, v22.8h\n"
-    "fmax v18.8h, v21.8h, v18.8h\n"
-    "str h18, [x11, x13]\n"
+    "add x14, x14, #0x2\n"
+    "fmax v18.8h, v18.8h, v21.8h\n"
     "fmax v17.8h, v20.8h, v17.8h\n"
     "fmax v16.8h, v20.8h, v16.8h\n"
-    "str h17, [x10, x13]\n"
-    "str h16, [x9, x13]\n"
-    "add x13, x13, #0x2\n"
+    "str h19, [x13, x11]\n"
+    "str h18, [x12, x11]\n"
+    "str h17, [x10, x11]\n"
+    "str h16, [x9, x11]\n"
+    "add x11, x11, #0x2\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
@@ -175,4 +173,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif  // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp
index 8bea0bf..4998b37 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_fp16_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
 
-struct a64_fp16_nhwc_max_generic_depthfirst
+struct a64_fp16_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_fp16_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
   a64_fp16_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_fp16_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
index e5f7ee3..f1eec31 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
 
@@ -39,31 +40,31 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x20\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "mov w20, #0xfc00\n"
-    "dup v7.8h, w20\n"
-    "mov x19, %x[inptrs]\n"
-    "dup v6.8h, w20\n"
+    "mov w19, #0xfc00\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
-    "dup v5.8h, w20\n"
-    "dup v4.8h, w20\n"
+    "dup v6.8h, w19\n"
+    "dup v5.8h, w19\n"
+    "dup v4.8h, w19\n"
+    "dup v3.8h, w19\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -76,47 +77,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fmax v23.8h, v3.8h, v2.8h\n"
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fmax v19.8h, v1.8h, v0.8h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fmax v22.8h, v31.8h, v30.8h\n"
-    "ldr q3, [x23, x28]\n"
+    "fmax v22.8h, v30.8h, v22.8h\n"
     "fmax v18.8h, v29.8h, v28.8h\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fmax v21.8h, v27.8h, v21.8h\n"
-    "ldr q2, [x22, x28]\n"
     "fmax v17.8h, v26.8h, v17.8h\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "fmax v20.8h, v25.8h, v20.8h\n"
-    "ldr q0, [x20, x28]\n"
     "fmax v16.8h, v24.8h, v16.8h\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "fmax v19.8h, v23.8h, v19.8h\n"
-    "ldr q30, [x22, x27]\n"
     "fmax v18.8h, v22.8h, v18.8h\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "fmax v17.8h, v21.8h, v17.8h\n"
-    "ldr q28, [x20, x27]\n"
     "fmax v16.8h, v20.8h, v16.8h\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "fmax v5.8h, v5.8h, v18.8h\n"
     "ldr q27, [x23, x26]\n"
-    "fmax v7.8h, v7.8h, v19.8h\n"
     "ldr q21, [x22, x26]\n"
-    "fmax v6.8h, v6.8h, v18.8h\n"
+    "fmax v4.8h, v4.8h, v17.8h\n"
+    "fmax v3.8h, v3.8h, v16.8h\n"
     "ldr q26, [x21, x26]\n"
-    "fmax v5.8h, v5.8h, v17.8h\n"
     "ldr q17, [x20, x26]\n"
-    "fmax v4.8h, v4.8h, v16.8h\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fmax v23.8h, v3.8h, v2.8h\n"
-    "fmax v19.8h, v1.8h, v0.8h\n"
-    "fmax v22.8h, v31.8h, v30.8h\n"
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
+    "fmax v22.8h, v30.8h, v22.8h\n"
     "fmax v18.8h, v29.8h, v28.8h\n"
     "fmax v21.8h, v27.8h, v21.8h\n"
     "fmax v17.8h, v26.8h, v17.8h\n"
@@ -126,36 +127,36 @@
     "fmax v18.8h, v22.8h, v18.8h\n"
     "fmax v17.8h, v21.8h, v17.8h\n"
     "fmax v16.8h, v20.8h, v16.8h\n"
-    "fmax v7.8h, v7.8h, v19.8h\n"
-    "fmax v6.8h, v6.8h, v18.8h\n"
-    "fmax v5.8h, v5.8h, v17.8h\n"
-    "fmax v4.8h, v4.8h, v16.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "fmax v5.8h, v5.8h, v18.8h\n"
+    "fmax v4.8h, v4.8h, v17.8h\n"
+    "fmax v3.8h, v3.8h, v16.8h\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fmax v7.8h, v7.8h, v3.8h\n"
-    "ldr q31, [x23, x27]\n"
+    "fmax v6.8h, v6.8h, v2.8h\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "fmax v6.8h, v6.8h, v31.8h\n"
+    "fmax v5.8h, v5.8h, v30.8h\n"
+    "fmax v4.8h, v4.8h, v27.8h\n"
     "ldr q25, [x23, x25]\n"
-    "fmax v5.8h, v5.8h, v27.8h\n"
-    "fmax v4.8h, v4.8h, v25.8h\n"
+    "fmax v3.8h, v3.8h, v25.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x40\n"
-    "str q6, [%x[outptr], x27]\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x20\n"
     "cmp %x[n_channels], #0x20\n"
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 31f\n"
     "7:"  // Single vector of channels
@@ -163,180 +164,178 @@
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
     "mov w19, #0xfc00\n"
-    "dup v7.8h, w19\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v6.8h, w19\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fmax v23.8h, v3.8h, v2.8h\n"
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fmax v19.8h, v1.8h, v0.8h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fmax v19.8h, v23.8h, v19.8h\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "fmax v7.8h, v7.8h, v19.8h\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fmax v23.8h, v3.8h, v2.8h\n"
-    "fmax v19.8h, v1.8h, v0.8h\n"
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
     "fmax v19.8h, v23.8h, v19.8h\n"
-    "fmax v7.8h, v7.8h, v19.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fmax v7.8h, v7.8h, v3.8h\n"
+    "fmax v6.8h, v6.8h, v2.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x8\n"
     "cmp %x[n_channels], #0x8\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 31f\n"
     "14:"  // Oddments
-    "add %x[outptr], %x[outptr], x28\n"
     "mov w19, #0xfc00\n"
-    "dup v7.8h, w19\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v6.8h, w19\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 20f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "b 19f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "b 19f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #0, 19f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "b 19f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 19f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 2: End
-    "fmax v23.8h, v3.8h, v2.8h\n"
+    "fmax v23.8h, v2.8h, v1.8h\n"
+    "fmax v19.8h, v0.8h, v31.8h\n"
     "subs x24, x24, #0x1\n"
-    "fmax v19.8h, v1.8h, v0.8h\n"
     "fmax v19.8h, v23.8h, v19.8h\n"
-    "fmax v7.8h, v7.8h, v19.8h\n"
+    "fmax v6.8h, v6.8h, v19.8h\n"
     "bgt 15b\n"
     "20:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 26f\n"
     "21:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #2, 23f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #1, 22f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "b 25f\n"
     "22:"  // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "b 25f\n"
     "23:"  // Oddments: Single input loop: Load: Bit 2: Unset
     "tbz %x[n_channels], #1, 24f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #0, 25f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "b 25f\n"
     "24:"  // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 25f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "25:"  // Oddments: Single input loop: Load: Bit 2: End
-    "fmax v7.8h, v7.8h, v3.8h\n"
     "subs x20, x20, #0x1\n"
+    "fmax v6.8h, v6.8h, v2.8h\n"
     "bgt 21b\n"
     "26:"  // Oddments: Single input loop: End
     "tbz %x[n_channels], #2, 28f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #1, 27f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[6], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "27:"  // Oddments: Store: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[4], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "28:"  // Oddments: Store: Bit 2: Unset
     "tbz %x[n_channels], #1, 29f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[2], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
     "b 30f\n"
     "29:"  // Oddments: Store: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 30f\n"
-    "st1 { v7.h }[0], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
     "30:"  // Oddments: Store: Bit 2: End
-
     "31:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
index 9a16b99..813e685 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,27 +29,18 @@
 
 void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst
+struct a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
+  using Parent = DepthfirstStrategy<float, float>;
 
-  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+  a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 3; }
-  constexpr static unsigned int pool_cols(void) { return 3; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
-
-  a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 71a8c74..fc0efc7 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #if defined(__aarch64__)
+
 #include <algorithm>
 #include <cstddef>
 #include <cstdint>
@@ -82,12 +83,12 @@
 
   __asm__ __volatile__(
     "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x5, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x6, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x4, #0x4\n"
-    "ldp x7, x8, [x20, #0x0]\n"
+    "mov x5, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x6, x7, [x20, #0x0]\n"
+    "mov x8, #0x0\n"
     "ldp x17, x16, [x20, #0x10]\n"
     "ldp x15, x14, [x19, #0x0]\n"
     "ldp x13, x12, [x19, #0x10]\n"
@@ -97,157 +98,156 @@
     "ldp x25, x24, [x19, #0x50]\n"
     "ldp x23, x22, [x19, #0x60]\n"
     "ldp x21, x20, [x19, #0x70]\n"
-    "ldr q8, [%x[args], %[offsetof_rescale]]\n"
+    "ldr q7, [%x[args], %[offsetof_rescale]]\n"
     "blt 3f\n"
-    "ldr q7, [x10, x5]\n"
     "lsr x19, x4, #0x2\n"
-    "ldr q6, [x9, x5]\n"
     "sub x4, x4, x19, LSL #2\n"
-    "ldr q5, [x26, x5]\n"
+    "ldr q6, [x10, x5]\n"
+    "ldr q5, [x9, x5]\n"
     "subs x19, x19, #0x1\n"
-    "ldr q4, [x25, x5]\n"
-    "ldr q3, [x14, x5]\n"
-    "ldr q2, [x13, x5]\n"
-    "ldr q1, [x11, x5]\n"
-    "ldr q0, [x27, x5]\n"
-    "ldr q31, [x28, x5]\n"
-    "ldr q30, [x24, x5]\n"
-    "ldr q29, [x22, x5]\n"
-    "ldr q28, [x21, x5]\n"
-    "ldr q27, [x15, x5]\n"
-    "ldr q26, [x12, x5]\n"
-    "ldr q25, [x23, x5]\n"
-    "ldr q24, [x20, x5]\n"
+    "ldr q4, [x26, x5]\n"
+    "ldr q3, [x25, x5]\n"
+    "ldr q2, [x14, x5]\n"
+    "ldr q1, [x13, x5]\n"
+    "ldr q0, [x11, x5]\n"
+    "ldr q31, [x27, x5]\n"
+    "ldr q30, [x28, x5]\n"
+    "ldr q29, [x24, x5]\n"
+    "ldr q28, [x22, x5]\n"
+    "ldr q27, [x21, x5]\n"
+    "ldr q26, [x15, x5]\n"
+    "ldr q25, [x12, x5]\n"
+    "ldr q24, [x23, x5]\n"
+    "ldr q23, [x20, x5]\n"
     "add x5, x5, #0x10\n"
     "beq 2f\n"
     "1:"  // Vector: Loop
-    "fadd v17.4s, v7.4s, v6.4s\n"
-    "ldr q7, [x10, x5]\n"
+    "fadd v17.4s, v6.4s, v5.4s\n"
+    "fadd v16.4s, v4.4s, v3.4s\n"
     "subs x19, x19, #0x1\n"
-    "fadd v16.4s, v5.4s, v4.4s\n"
-    "ldr q6, [x9, x5]\n"
-    "fadd v18.4s, v3.4s, v2.4s\n"
-    "ldr q5, [x26, x5]\n"
-    "fadd v23.4s, v1.4s, v0.4s\n"
-    "ldr q4, [x25, x5]\n"
-    "fadd v22.4s, v31.4s, v30.4s\n"
-    "ldr q3, [x14, x5]\n"
-    "fadd v17.4s, v17.4s, v16.4s\n"
-    "ldr q2, [x13, x5]\n"
-    "fadd v16.4s, v29.4s, v28.4s\n"
-    "ldr q1, [x11, x5]\n"
-    "fadd v19.4s, v27.4s, v23.4s\n"
-    "ldr q0, [x27, x5]\n"
-    "fadd v21.4s, v18.4s, v17.4s\n"
-    "ldr q31, [x28, x5]\n"
-    "fadd v20.4s, v16.4s, v17.4s\n"
-    "ldr q30, [x24, x5]\n"
-    "fadd v18.4s, v26.4s, v22.4s\n"
-    "ldr q29, [x22, x5]\n"
-    "fadd v17.4s, v25.4s, v23.4s\n"
-    "ldr q28, [x21, x5]\n"
-    "fadd v16.4s, v24.4s, v22.4s\n"
-    "ldr q27, [x15, x5]\n"
-    "fadd v19.4s, v21.4s, v19.4s\n"
-    "ldr q26, [x12, x5]\n"
-    "fadd v18.4s, v21.4s, v18.4s\n"
-    "ldr q25, [x23, x5]\n"
+    "ldr q6, [x10, x5]\n"
+    "fadd v19.4s, v17.4s, v16.4s\n"
+    "fadd v18.4s, v2.4s, v1.4s\n"
+    "ldr q5, [x9, x5]\n"
+    "ldr q4, [x26, x5]\n"
+    "fadd v17.4s, v0.4s, v31.4s\n"
+    "fadd v22.4s, v30.4s, v29.4s\n"
+    "ldr q3, [x25, x5]\n"
+    "ldr q2, [x14, x5]\n"
+    "fadd v16.4s, v28.4s, v27.4s\n"
+    "fadd v21.4s, v18.4s, v19.4s\n"
+    "ldr q1, [x13, x5]\n"
+    "ldr q0, [x11, x5]\n"
+    "fadd v20.4s, v16.4s, v19.4s\n"
+    "fadd v19.4s, v26.4s, v17.4s\n"
+    "ldr q31, [x27, x5]\n"
+    "ldr q30, [x28, x5]\n"
+    "fadd v18.4s, v25.4s, v22.4s\n"
+    "fadd v17.4s, v24.4s, v17.4s\n"
+    "ldr q29, [x24, x5]\n"
+    "ldr q28, [x22, x5]\n"
+    "fadd v16.4s, v23.4s, v22.4s\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "ldr q27, [x21, x5]\n"
+    "ldr q26, [x15, x5]\n"
+    "fadd v18.4s, v18.4s, v21.4s\n"
     "fadd v17.4s, v17.4s, v20.4s\n"
-    "ldr q24, [x20, x5]\n"
+    "ldr q25, [x12, x5]\n"
+    "ldr q24, [x23, x5]\n"
+    "fadd v16.4s, v16.4s, v20.4s\n"
+    "fmul v19.4s, v19.4s, v7.s[0]\n"
+    "ldr q23, [x20, x5]\n"
     "add x5, x5, #0x10\n"
-    "fadd v16.4s, v20.4s, v16.4s\n"
-    "fmul v19.4s, v19.4s, v8.s[0]\n"
-    "str q19, [x7, x6]\n"
-    "fmul v18.4s, v18.4s, v8.s[1]\n"
-    "fmul v17.4s, v17.4s, v8.s[2]\n"
-    "str q18, [x8, x6]\n"
-    "fmul v16.4s, v16.4s, v8.s[3]\n"
-    "str q17, [x17, x6]\n"
-    "str q16, [x16, x6]\n"
-    "add x6, x6, #0x10\n"
+    "fmul v18.4s, v18.4s, v7.s[1]\n"
+    "fmul v17.4s, v17.4s, v7.s[2]\n"
+    "str q19, [x6, x8]\n"
+    "fmul v16.4s, v16.4s, v7.s[3]\n"
+    "str q18, [x7, x8]\n"
+    "str q17, [x17, x8]\n"
+    "str q16, [x16, x8]\n"
+    "add x8, x8, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
-    "fadd v17.4s, v7.4s, v6.4s\n"
-    "fadd v16.4s, v5.4s, v4.4s\n"
-    "fadd v18.4s, v3.4s, v2.4s\n"
-    "fadd v23.4s, v1.4s, v0.4s\n"
-    "fadd v17.4s, v17.4s, v16.4s\n"
-    "fadd v22.4s, v31.4s, v30.4s\n"
-    "fadd v16.4s, v29.4s, v28.4s\n"
-    "fadd v21.4s, v18.4s, v17.4s\n"
-    "fadd v19.4s, v27.4s, v23.4s\n"
-    "fadd v20.4s, v16.4s, v17.4s\n"
-    "fadd v18.4s, v26.4s, v22.4s\n"
-    "fadd v17.4s, v25.4s, v23.4s\n"
-    "fadd v16.4s, v24.4s, v22.4s\n"
-    "fadd v19.4s, v21.4s, v19.4s\n"
-    "fadd v18.4s, v21.4s, v18.4s\n"
+    "fadd v17.4s, v6.4s, v5.4s\n"
+    "fadd v16.4s, v4.4s, v3.4s\n"
+    "fadd v19.4s, v17.4s, v16.4s\n"
+    "fadd v18.4s, v2.4s, v1.4s\n"
+    "fadd v17.4s, v0.4s, v31.4s\n"
+    "fadd v22.4s, v30.4s, v29.4s\n"
+    "fadd v16.4s, v28.4s, v27.4s\n"
+    "fadd v21.4s, v18.4s, v19.4s\n"
+    "fadd v20.4s, v16.4s, v19.4s\n"
+    "fadd v19.4s, v26.4s, v17.4s\n"
+    "fadd v18.4s, v25.4s, v22.4s\n"
+    "fadd v17.4s, v24.4s, v17.4s\n"
+    "fadd v16.4s, v23.4s, v22.4s\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "fadd v18.4s, v18.4s, v21.4s\n"
     "fadd v17.4s, v17.4s, v20.4s\n"
-    "fadd v16.4s, v20.4s, v16.4s\n"
-    "fmul v19.4s, v19.4s, v8.s[0]\n"
-    "str q19, [x7, x6]\n"
-    "fmul v18.4s, v18.4s, v8.s[1]\n"
-    "fmul v17.4s, v17.4s, v8.s[2]\n"
-    "str q18, [x8, x6]\n"
-    "fmul v16.4s, v16.4s, v8.s[3]\n"
-    "str q17, [x17, x6]\n"
-    "str q16, [x16, x6]\n"
-    "add x6, x6, #0x10\n"
+    "fadd v16.4s, v16.4s, v20.4s\n"
+    "fmul v19.4s, v19.4s, v7.s[0]\n"
+    "str q19, [x6, x8]\n"
+    "fmul v18.4s, v18.4s, v7.s[1]\n"
+    "fmul v17.4s, v17.4s, v7.s[2]\n"
+    "str q18, [x7, x8]\n"
+    "fmul v16.4s, v16.4s, v7.s[3]\n"
+    "str q17, [x17, x8]\n"
+    "str q16, [x16, x8]\n"
+    "add x8, x8, #0x10\n"
     "cbz x4, 4f\n"
     "3:"  // Oddments
-    "ldr s7, [x10, x5]\n"
+    "ldr s6, [x10, x5]\n"
+    "ldr s5, [x9, x5]\n"
+    "fadd v17.4s, v6.4s, v5.4s\n"
     "subs x4, x4, #0x1\n"
-    "ldr s6, [x9, x5]\n"
-    "fadd v17.4s, v7.4s, v6.4s\n"
-    "ldr s5, [x26, x5]\n"
-    "ldr s4, [x25, x5]\n"
-    "fadd v16.4s, v5.4s, v4.4s\n"
-    "ldr s3, [x14, x5]\n"
-    "ldr s2, [x13, x5]\n"
-    "fadd v17.4s, v17.4s, v16.4s\n"
-    "ldr s1, [x11, x5]\n"
-    "ldr s0, [x27, x5]\n"
-    "fadd v18.4s, v3.4s, v2.4s\n"
-    "ldr s31, [x28, x5]\n"
-    "fadd v23.4s, v1.4s, v0.4s\n"
-    "ldr s30, [x24, x5]\n"
-    "fadd v21.4s, v18.4s, v17.4s\n"
-    "ldr s29, [x22, x5]\n"
-    "ldr s28, [x21, x5]\n"
-    "fadd v22.4s, v31.4s, v30.4s\n"
-    "ldr s27, [x15, x5]\n"
-    "ldr s26, [x12, x5]\n"
-    "fadd v16.4s, v29.4s, v28.4s\n"
-    "ldr s25, [x23, x5]\n"
-    "fadd v20.4s, v16.4s, v17.4s\n"
-    "ldr s24, [x20, x5]\n"
+    "ldr s4, [x26, x5]\n"
+    "ldr s3, [x25, x5]\n"
+    "fadd v16.4s, v4.4s, v3.4s\n"
+    "fadd v19.4s, v17.4s, v16.4s\n"
+    "ldr s2, [x14, x5]\n"
+    "ldr s1, [x13, x5]\n"
+    "fadd v18.4s, v2.4s, v1.4s\n"
+    "fadd v21.4s, v18.4s, v19.4s\n"
+    "ldr s0, [x11, x5]\n"
+    "ldr s31, [x27, x5]\n"
+    "fadd v17.4s, v0.4s, v31.4s\n"
+    "ldr s30, [x28, x5]\n"
+    "ldr s29, [x24, x5]\n"
+    "fadd v22.4s, v30.4s, v29.4s\n"
+    "ldr s28, [x22, x5]\n"
+    "ldr s27, [x21, x5]\n"
+    "fadd v16.4s, v28.4s, v27.4s\n"
+    "fadd v20.4s, v16.4s, v19.4s\n"
+    "ldr s26, [x15, x5]\n"
+    "ldr s25, [x12, x5]\n"
+    "fadd v19.4s, v26.4s, v17.4s\n"
+    "fadd v18.4s, v25.4s, v22.4s\n"
+    "ldr s24, [x23, x5]\n"
+    "ldr s23, [x20, x5]\n"
+    "fadd v17.4s, v24.4s, v17.4s\n"
+    "fadd v16.4s, v23.4s, v22.4s\n"
+    "fadd v19.4s, v19.4s, v21.4s\n"
+    "fadd v18.4s, v18.4s, v21.4s\n"
     "add x5, x5, #0x4\n"
-    "fadd v19.4s, v27.4s, v23.4s\n"
-    "fadd v18.4s, v26.4s, v22.4s\n"
-    "fadd v17.4s, v25.4s, v23.4s\n"
-    "fadd v16.4s, v24.4s, v22.4s\n"
-    "fadd v19.4s, v21.4s, v19.4s\n"
-    "fadd v18.4s, v21.4s, v18.4s\n"
     "fadd v17.4s, v17.4s, v20.4s\n"
-    "fadd v16.4s, v20.4s, v16.4s\n"
-    "fmul v19.4s, v19.4s, v8.s[0]\n"
-    "str s19, [x7, x6]\n"
-    "fmul v18.4s, v18.4s, v8.s[1]\n"
-    "fmul v17.4s, v17.4s, v8.s[2]\n"
-    "str s18, [x8, x6]\n"
-    "fmul v16.4s, v16.4s, v8.s[3]\n"
-    "str s17, [x17, x6]\n"
-    "str s16, [x16, x6]\n"
-    "add x6, x6, #0x4\n"
+    "fadd v16.4s, v16.4s, v20.4s\n"
+    "fmul v19.4s, v19.4s, v7.s[0]\n"
+    "fmul v18.4s, v18.4s, v7.s[1]\n"
+    "str s19, [x6, x8]\n"
+    "fmul v17.4s, v17.4s, v7.s[2]\n"
+    "fmul v16.4s, v16.4s, v7.s[3]\n"
+    "str s18, [x7, x8]\n"
+    "str s17, [x17, x8]\n"
+    "str s16, [x16, x8]\n"
+    "add x8, x8, #0x4\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-#endif // defined(__aarch64__)
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp
index 4ef2631..26895e6 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_fp32_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
 
-struct a64_fp32_nhwc_avg_generic_depthfirst
+struct a64_fp32_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_fp32_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<float, float>;
   a64_fp32_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_fp32_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 21f7054..2d20164 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,10 +22,11 @@
  * SOFTWARE.
  */
 
-#include <cstdint>
-
 #if defined(__aarch64__)
 
+#include <cstdint>
+#include <cstddef>
+
 namespace arm_conv {
 namespace pooling {
 
@@ -41,31 +42,31 @@
   const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
 
   __asm__ __volatile__(
-    "ld1r { v8.4s }, [%x[rescale_ptr]]\n"
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x10\n"
+    "ld1r { v7.4s }, [%x[rescale_ptr]]\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v6.16b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
     "movi v5.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -78,47 +79,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fadd v23.4s, v3.4s, v2.4s\n"
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd v19.4s, v1.4s, v0.4s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fadd v22.4s, v31.4s, v30.4s\n"
-    "ldr q3, [x23, x28]\n"
+    "fadd v22.4s, v30.4s, v22.4s\n"
     "fadd v18.4s, v29.4s, v28.4s\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fadd v21.4s, v27.4s, v21.4s\n"
-    "ldr q2, [x22, x28]\n"
     "fadd v17.4s, v26.4s, v17.4s\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "fadd v20.4s, v25.4s, v20.4s\n"
-    "ldr q0, [x20, x28]\n"
     "fadd v16.4s, v24.4s, v16.4s\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "fadd v19.4s, v23.4s, v19.4s\n"
-    "ldr q30, [x22, x27]\n"
     "fadd v18.4s, v22.4s, v18.4s\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "fadd v17.4s, v21.4s, v17.4s\n"
-    "ldr q28, [x20, x27]\n"
     "fadd v16.4s, v20.4s, v16.4s\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "fadd v5.4s, v5.4s, v18.4s\n"
     "ldr q27, [x23, x26]\n"
-    "fadd v7.4s, v7.4s, v19.4s\n"
     "ldr q21, [x22, x26]\n"
-    "fadd v6.4s, v6.4s, v18.4s\n"
+    "fadd v4.4s, v4.4s, v17.4s\n"
+    "fadd v3.4s, v3.4s, v16.4s\n"
     "ldr q26, [x21, x26]\n"
-    "fadd v5.4s, v5.4s, v17.4s\n"
     "ldr q17, [x20, x26]\n"
-    "fadd v4.4s, v4.4s, v16.4s\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fadd v23.4s, v3.4s, v2.4s\n"
-    "fadd v19.4s, v1.4s, v0.4s\n"
-    "fadd v22.4s, v31.4s, v30.4s\n"
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
+    "fadd v22.4s, v30.4s, v22.4s\n"
     "fadd v18.4s, v29.4s, v28.4s\n"
     "fadd v21.4s, v27.4s, v21.4s\n"
     "fadd v17.4s, v26.4s, v17.4s\n"
@@ -128,177 +129,174 @@
     "fadd v18.4s, v22.4s, v18.4s\n"
     "fadd v17.4s, v21.4s, v17.4s\n"
     "fadd v16.4s, v20.4s, v16.4s\n"
-    "fadd v7.4s, v7.4s, v19.4s\n"
-    "fadd v6.4s, v6.4s, v18.4s\n"
-    "fadd v5.4s, v5.4s, v17.4s\n"
-    "fadd v4.4s, v4.4s, v16.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "fadd v5.4s, v5.4s, v18.4s\n"
+    "fadd v4.4s, v4.4s, v17.4s\n"
+    "fadd v3.4s, v3.4s, v16.4s\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fadd v7.4s, v7.4s, v3.4s\n"
-    "ldr q31, [x23, x27]\n"
+    "fadd v6.4s, v6.4s, v2.4s\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "fadd v6.4s, v6.4s, v31.4s\n"
+    "fadd v5.4s, v5.4s, v30.4s\n"
+    "fadd v4.4s, v4.4s, v27.4s\n"
     "ldr q25, [x23, x25]\n"
-    "fadd v5.4s, v5.4s, v27.4s\n"
-    "fadd v4.4s, v4.4s, v25.4s\n"
+    "fadd v3.4s, v3.4s, v25.4s\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "fmul v7.4s, v7.4s, v8.4s\n"
-    "str q7, [%x[outptr], x28]\n"
-    "fmul v6.4s, v6.4s, v8.4s\n"
-    "add x28, x28, #0x40\n"
-    "fmul v5.4s, v5.4s, v8.4s\n"
-    "str q6, [%x[outptr], x27]\n"
-    "fmul v4.4s, v4.4s, v8.4s\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x10\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "cmp %x[n_channels], #0x10\n"
+    "fmul v6.4s, v6.4s, v7.4s\n"
+    "fmul v5.4s, v5.4s, v7.4s\n"
+    "fmul v4.4s, v4.4s, v7.4s\n"
+    "fmul v3.4s, v3.4s, v7.4s\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x40\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 25f\n"
     "7:"  // Single vector of channels
     "cmp %x[n_channels], #0x4\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fadd v23.4s, v3.4s, v2.4s\n"
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd v19.4s, v1.4s, v0.4s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fadd v19.4s, v23.4s, v19.4s\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "fadd v7.4s, v7.4s, v19.4s\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fadd v23.4s, v3.4s, v2.4s\n"
-    "fadd v19.4s, v1.4s, v0.4s\n"
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
     "fadd v19.4s, v23.4s, v19.4s\n"
-    "fadd v7.4s, v7.4s, v19.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fadd v7.4s, v7.4s, v3.4s\n"
+    "fadd v6.4s, v6.4s, v2.4s\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "fmul v7.4s, v7.4s, v8.4s\n"
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x4\n"
     "cmp %x[n_channels], #0x4\n"
+    "fmul v6.4s, v6.4s, v7.4s\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 25f\n"
     "14:"  // Oddments
-    "movi v7.16b, #0x0\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 18f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #0, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "b 17f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 1: Unset
     "tbz %x[n_channels], #0, 17f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 1: End
-    "fadd v23.4s, v3.4s, v2.4s\n"
+    "fadd v23.4s, v2.4s, v1.4s\n"
+    "fadd v19.4s, v0.4s, v31.4s\n"
     "subs x24, x24, #0x1\n"
-    "fadd v19.4s, v1.4s, v0.4s\n"
     "fadd v19.4s, v23.4s, v19.4s\n"
-    "fadd v7.4s, v7.4s, v19.4s\n"
+    "fadd v6.4s, v6.4s, v19.4s\n"
     "bgt 15b\n"
     "18:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 22f\n"
     "19:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #0, 21f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "b 21f\n"
     "20:"  // Oddments: Single input loop: Load: Bit 1: Unset
     "tbz %x[n_channels], #0, 21f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "21:"  // Oddments: Single input loop: Load: Bit 1: End
-    "fadd v7.4s, v7.4s, v3.4s\n"
     "subs x20, x20, #0x1\n"
+    "fadd v6.4s, v6.4s, v2.4s\n"
     "bgt 19b\n"
     "22:"  // Oddments: Single input loop: End
-    "fmul v7.4s, v7.4s, v8.4s\n"
+    "fmul v6.4s, v6.4s, v7.4s\n"
     "tbz %x[n_channels], #1, 23f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #0, 24f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "b 24f\n"
     "23:"  // Oddments: Store: Bit 1: Unset
     "tbz %x[n_channels], #0, 24f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "24:"  // Oddments: Store: Bit 1: End
-
     "25:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 9a22adf..4bf5770 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,27 +29,18 @@
 
 void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst
+struct a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
+  using Parent = DepthfirstStrategy<float, float>;
 
-  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index a924c9a..db01487 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -63,12 +63,12 @@
 
   __asm__ __volatile__(
     "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x14, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x13, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x15, #0x4\n"
-    "ldp x12, x11, [x20, #0x0]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x13, x12, [x20, #0x0]\n"
+    "mov x11, #0x0\n"
     "ldp x10, x9, [x20, #0x10]\n"
     "ldp x28, x27, [x19, #0x0]\n"
     "ldp x26, x25, [x19, #0x10]\n"
@@ -76,12 +76,12 @@
     "ldp x22, x21, [x19, #0x30]\n"
     "ldr x20, [x19, #0x40]\n"
     "blt 3f\n"
-    "ldr q30, [x27, x14]\n"
     "lsr x19, x15, #0x2\n"
-    "ldr q29, [x24, x14]\n"
     "sub x15, x15, x19, LSL #2\n"
-    "ldr q28, [x21, x14]\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
     "subs x19, x19, #0x1\n"
+    "ldr q28, [x21, x14]\n"
     "ldr q27, [x25, x14]\n"
     "ldr q26, [x28, x14]\n"
     "ldr q25, [x23, x14]\n"
@@ -92,31 +92,31 @@
     "beq 2f\n"
     "1:"  // Vector: Loop
     "fmax v21.4s, v30.4s, v29.4s\n"
-    "ldr q30, [x27, x14]\n"
-    "subs x19, x19, #0x1\n"
     "fmax v20.4s, v29.4s, v28.4s\n"
-    "ldr q29, [x24, x14]\n"
+    "subs x19, x19, #0x1\n"
+    "ldr q30, [x27, x14]\n"
     "fmax v19.4s, v27.4s, v26.4s\n"
-    "ldr q28, [x21, x14]\n"
     "fmax v18.4s, v25.4s, v24.4s\n"
-    "ldr q26, [x28, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
     "fmax v17.4s, v23.4s, v27.4s\n"
-    "ldr q27, [x25, x14]\n"
     "fmax v16.4s, v25.4s, v22.4s\n"
-    "ldr q25, [x23, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
     "fmax v19.4s, v21.4s, v19.4s\n"
+    "fmax v18.4s, v18.4s, v21.4s\n"
+    "ldr q25, [x23, x14]\n"
     "ldr q24, [x26, x14]\n"
-    "fmax v18.4s, v21.4s, v18.4s\n"
-    "ldr q23, [x22, x14]\n"
     "fmax v17.4s, v20.4s, v17.4s\n"
+    "fmax v16.4s, v20.4s, v16.4s\n"
+    "ldr q23, [x22, x14]\n"
     "ldr q22, [x20, x14]\n"
     "add x14, x14, #0x10\n"
-    "fmax v16.4s, v20.4s, v16.4s\n"
-    "str q19, [x12, x13]\n"
-    "str q18, [x11, x13]\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q19, [x13, x11]\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
     "fmax v21.4s, v30.4s, v29.4s\n"
@@ -126,45 +126,44 @@
     "fmax v17.4s, v23.4s, v27.4s\n"
     "fmax v16.4s, v25.4s, v22.4s\n"
     "fmax v19.4s, v21.4s, v19.4s\n"
-    "str q19, [x12, x13]\n"
-    "fmax v18.4s, v21.4s, v18.4s\n"
+    "fmax v18.4s, v18.4s, v21.4s\n"
+    "str q19, [x13, x11]\n"
     "fmax v17.4s, v20.4s, v17.4s\n"
-    "str q18, [x11, x13]\n"
     "fmax v16.4s, v20.4s, v16.4s\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "cbz x15, 4f\n"
     "3:"  // Oddments
     "ldr s30, [x27, x14]\n"
-    "subs x15, x15, #0x1\n"
     "ldr s29, [x24, x14]\n"
     "fmax v21.4s, v30.4s, v29.4s\n"
+    "subs x15, x15, #0x1\n"
     "ldr s28, [x21, x14]\n"
     "ldr s27, [x25, x14]\n"
     "fmax v20.4s, v29.4s, v28.4s\n"
     "ldr s26, [x28, x14]\n"
     "ldr s25, [x23, x14]\n"
     "fmax v19.4s, v27.4s, v26.4s\n"
+    "fmax v19.4s, v21.4s, v19.4s\n"
     "ldr s24, [x26, x14]\n"
     "ldr s23, [x22, x14]\n"
-    "fmax v19.4s, v21.4s, v19.4s\n"
-    "ldr s22, [x20, x14]\n"
-    "add x14, x14, #0x4\n"
     "fmax v18.4s, v25.4s, v24.4s\n"
-    "str s19, [x12, x13]\n"
     "fmax v17.4s, v23.4s, v27.4s\n"
+    "ldr s22, [x20, x14]\n"
     "fmax v16.4s, v25.4s, v22.4s\n"
-    "fmax v18.4s, v21.4s, v18.4s\n"
-    "str s18, [x11, x13]\n"
+    "add x14, x14, #0x4\n"
+    "fmax v18.4s, v18.4s, v21.4s\n"
     "fmax v17.4s, v20.4s, v17.4s\n"
     "fmax v16.4s, v20.4s, v16.4s\n"
-    "str s17, [x10, x13]\n"
-    "str s16, [x9, x13]\n"
-    "add x13, x13, #0x4\n"
+    "str s19, [x13, x11]\n"
+    "str s18, [x12, x11]\n"
+    "str s17, [x10, x11]\n"
+    "str s16, [x9, x11]\n"
+    "add x11, x11, #0x4\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
@@ -173,4 +172,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-#endif // defined(__aarch64__)
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp
index b20ffc2..7577b31 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_fp32_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
 
-struct a64_fp32_nhwc_max_generic_depthfirst
+struct a64_fp32_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_fp32_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<float, float>;
   a64_fp32_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_fp32_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
index e0acb7a..4752057 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,10 +22,11 @@
  * SOFTWARE.
  */
 
-#include <cstdint>
-
 #if defined(__aarch64__)
 
+#include <cstdint>
+#include <cstddef>
+
 namespace arm_conv {
 namespace pooling {
 
@@ -39,31 +40,31 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x10\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "mov w20, #0xff800000\n"
-    "dup v7.4s, w20\n"
-    "mov x19, %x[inptrs]\n"
-    "dup v6.4s, w20\n"
+    "mov w19, #0xff800000\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
-    "dup v5.4s, w20\n"
-    "dup v4.4s, w20\n"
+    "dup v6.4s, w19\n"
+    "dup v5.4s, w19\n"
+    "dup v4.4s, w19\n"
+    "dup v3.4s, w19\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -76,47 +77,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fmax v23.4s, v3.4s, v2.4s\n"
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fmax v19.4s, v1.4s, v0.4s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fmax v22.4s, v31.4s, v30.4s\n"
-    "ldr q3, [x23, x28]\n"
+    "fmax v22.4s, v30.4s, v22.4s\n"
     "fmax v18.4s, v29.4s, v28.4s\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fmax v21.4s, v27.4s, v21.4s\n"
-    "ldr q2, [x22, x28]\n"
     "fmax v17.4s, v26.4s, v17.4s\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "fmax v20.4s, v25.4s, v20.4s\n"
-    "ldr q0, [x20, x28]\n"
     "fmax v16.4s, v24.4s, v16.4s\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "fmax v19.4s, v23.4s, v19.4s\n"
-    "ldr q30, [x22, x27]\n"
     "fmax v18.4s, v22.4s, v18.4s\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "fmax v17.4s, v21.4s, v17.4s\n"
-    "ldr q28, [x20, x27]\n"
     "fmax v16.4s, v20.4s, v16.4s\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "fmax v5.4s, v5.4s, v18.4s\n"
     "ldr q27, [x23, x26]\n"
-    "fmax v7.4s, v7.4s, v19.4s\n"
     "ldr q21, [x22, x26]\n"
-    "fmax v6.4s, v6.4s, v18.4s\n"
+    "fmax v4.4s, v4.4s, v17.4s\n"
+    "fmax v3.4s, v3.4s, v16.4s\n"
     "ldr q26, [x21, x26]\n"
-    "fmax v5.4s, v5.4s, v17.4s\n"
     "ldr q17, [x20, x26]\n"
-    "fmax v4.4s, v4.4s, v16.4s\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fmax v23.4s, v3.4s, v2.4s\n"
-    "fmax v19.4s, v1.4s, v0.4s\n"
-    "fmax v22.4s, v31.4s, v30.4s\n"
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
+    "fmax v22.4s, v30.4s, v22.4s\n"
     "fmax v18.4s, v29.4s, v28.4s\n"
     "fmax v21.4s, v27.4s, v21.4s\n"
     "fmax v17.4s, v26.4s, v17.4s\n"
@@ -126,36 +127,36 @@
     "fmax v18.4s, v22.4s, v18.4s\n"
     "fmax v17.4s, v21.4s, v17.4s\n"
     "fmax v16.4s, v20.4s, v16.4s\n"
-    "fmax v7.4s, v7.4s, v19.4s\n"
-    "fmax v6.4s, v6.4s, v18.4s\n"
-    "fmax v5.4s, v5.4s, v17.4s\n"
-    "fmax v4.4s, v4.4s, v16.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "fmax v5.4s, v5.4s, v18.4s\n"
+    "fmax v4.4s, v4.4s, v17.4s\n"
+    "fmax v3.4s, v3.4s, v16.4s\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fmax v7.4s, v7.4s, v3.4s\n"
-    "ldr q31, [x23, x27]\n"
+    "fmax v6.4s, v6.4s, v2.4s\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "fmax v6.4s, v6.4s, v31.4s\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "fmax v4.4s, v4.4s, v27.4s\n"
     "ldr q25, [x23, x25]\n"
-    "fmax v5.4s, v5.4s, v27.4s\n"
-    "fmax v4.4s, v4.4s, v25.4s\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x40\n"
-    "str q6, [%x[outptr], x27]\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x10\n"
     "cmp %x[n_channels], #0x10\n"
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 25f\n"
     "7:"  // Single vector of channels
@@ -163,136 +164,133 @@
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
     "mov w19, #0xff800000\n"
-    "dup v7.4s, w19\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v6.4s, w19\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fmax v23.4s, v3.4s, v2.4s\n"
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fmax v19.4s, v1.4s, v0.4s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fmax v19.4s, v23.4s, v19.4s\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "fmax v7.4s, v7.4s, v19.4s\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fmax v23.4s, v3.4s, v2.4s\n"
-    "fmax v19.4s, v1.4s, v0.4s\n"
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
     "fmax v19.4s, v23.4s, v19.4s\n"
-    "fmax v7.4s, v7.4s, v19.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "fmax v7.4s, v7.4s, v3.4s\n"
+    "fmax v6.4s, v6.4s, v2.4s\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x4\n"
     "cmp %x[n_channels], #0x4\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 25f\n"
     "14:"  // Oddments
-    "add %x[outptr], %x[outptr], x28\n"
     "mov w19, #0xff800000\n"
-    "dup v7.4s, w19\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "dup v6.4s, w19\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 18f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #0, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "b 17f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 1: Unset
     "tbz %x[n_channels], #0, 17f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 1: End
-    "fmax v23.4s, v3.4s, v2.4s\n"
+    "fmax v23.4s, v2.4s, v1.4s\n"
+    "fmax v19.4s, v0.4s, v31.4s\n"
     "subs x24, x24, #0x1\n"
-    "fmax v19.4s, v1.4s, v0.4s\n"
     "fmax v19.4s, v23.4s, v19.4s\n"
-    "fmax v7.4s, v7.4s, v19.4s\n"
+    "fmax v6.4s, v6.4s, v19.4s\n"
     "bgt 15b\n"
     "18:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 22f\n"
     "19:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #0, 21f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "b 21f\n"
     "20:"  // Oddments: Single input loop: Load: Bit 1: Unset
     "tbz %x[n_channels], #0, 21f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "21:"  // Oddments: Single input loop: Load: Bit 1: End
-    "fmax v7.4s, v7.4s, v3.4s\n"
     "subs x20, x20, #0x1\n"
+    "fmax v6.4s, v6.4s, v2.4s\n"
     "bgt 19b\n"
     "22:"  // Oddments: Single input loop: End
     "tbz %x[n_channels], #1, 23f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #0, 24f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "b 24f\n"
     "23:"  // Oddments: Store: Bit 1: Unset
     "tbz %x[n_channels], #0, 24f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "24:"  // Oddments: Store: Bit 1: End
-
     "25:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp
index df66ab7..de94ec0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_s8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
 
-struct a64_s8_nhwc_avg_generic_depthfirst
+struct a64_s8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_s8_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
   a64_s8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_s8_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 405ae66..8d6d73a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,13 +22,14 @@
  * SOFTWARE.
  */
 
+#if defined(__aarch64__)
+
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(__aarch64__)
-
 namespace arm_conv {
 namespace pooling {
 
@@ -83,27 +84,28 @@
       shift_value--;
       f_rescale_value *= 2.0f;
     }
-    int64_t large_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
-    if (large_rescale_value == (1ll << 31))
+
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      large_rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
-    rescale_value = static_cast<int32_t>(large_rescale_value);
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   __asm__ __volatile__(
-    "mov x26, #0x0\n"
-    "mov x25, #0x10\n" // cntb _, ALL, #1
-    "mov x24, #0x20\n" // cntb _, ALL, #2
-    "mov x23, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n"  // cntb _, ALL, #1
+    "mov x24, #0x20\n"  // cntb _, ALL, #2
+    "mov x23, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "movi v11.4s, #0x0\n"
@@ -120,10 +122,10 @@
     "movi v0.4s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "ldr q29, [x21, x25]\n"
     "ldr q28, [x20, x25]\n"
     "ldr q27, [x21, x24]\n"
@@ -133,28 +135,28 @@
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "saddl v21.8h, v29.8b, v28.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
     "saddl2 v20.8h, v29.16b, v28.16b\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "saddl v19.8h, v27.8b, v26.8b\n"
-    "ldr q29, [x21, x25]\n"
     "saddl2 v18.8h, v27.16b, v26.16b\n"
-    "ldr q28, [x20, x25]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
     "saddl v17.8h, v25.8b, v24.8b\n"
-    "ldr q27, [x21, x24]\n"
     "saddl2 v16.8h, v25.16b, v24.16b\n"
-    "ldr q26, [x20, x24]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
-    "ldr q25, [x21, x23]\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q24, [x20, x23]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q24, [x20, x23]\n"
     "saddw v11.4s, v11.4s, v21.4h\n"
     "saddw2 v10.4s, v10.4s, v21.8h\n"
     "saddw v9.4s, v9.4s, v20.4h\n"
@@ -198,17 +200,17 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "sxtl v23.8h, v31.8b\n"
-    "ldr q29, [x21, x25]\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "ldr q29, [x21, x25]\n"
     "ldr q27, [x21, x24]\n"
-    "ldr q25, [x21, x23]\n"
     "sxtl v21.8h, v29.8b\n"
     "sxtl2 v20.8h, v29.16b\n"
+    "ldr q25, [x21, x23]\n"
     "sxtl v19.8h, v27.8b\n"
     "sxtl2 v18.8h, v27.16b\n"
+    "subs x20, x20, #0x1\n"
     "sxtl v17.8h, v25.8b\n"
     "sxtl2 v16.8h, v25.16b\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
@@ -229,91 +231,91 @@
     "saddw2 v0.4s, v0.4s, v16.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "movi v19.4s, #0x7f\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
     "sub %x[n_channels], %x[n_channels], #0x40\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
-    "not v16.16b, v19.16b\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
     "cmp %x[n_channels], #0x40\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
-    "sqdmulh v11.4s, v11.4s, v18.4s\n"
-    "sqdmulh v10.4s, v10.4s, v18.4s\n"
-    "sqdmulh v9.4s, v9.4s, v18.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
-    "srshl v11.4s, v11.4s, v17.4s\n"
-    "srshl v10.4s, v10.4s, v17.4s\n"
-    "srshl v9.4s, v9.4s, v17.4s\n"
-    "sqdmulh v8.4s, v8.4s, v18.4s\n"
-    "sqdmulh v7.4s, v7.4s, v18.4s\n"
-    "sqdmulh v6.4s, v6.4s, v18.4s\n"
-    "sqdmulh v5.4s, v5.4s, v18.4s\n"
-    "srshl v8.4s, v8.4s, v17.4s\n"
-    "srshl v7.4s, v7.4s, v17.4s\n"
-    "srshl v6.4s, v6.4s, v17.4s\n"
-    "srshl v5.4s, v5.4s, v17.4s\n"
-    "sqdmulh v4.4s, v4.4s, v18.4s\n"
-    "sqdmulh v3.4s, v3.4s, v18.4s\n"
-    "sqdmulh v2.4s, v2.4s, v18.4s\n"
-    "sqdmulh v1.4s, v1.4s, v18.4s\n"
-    "srshl v4.4s, v4.4s, v17.4s\n"
-    "srshl v3.4s, v3.4s, v17.4s\n"
-    "srshl v2.4s, v2.4s, v17.4s\n"
-    "srshl v1.4s, v1.4s, v17.4s\n"
-    "sqdmulh v0.4s, v0.4s, v18.4s\n"
+    "sqdmulh v11.4s, v11.4s, v17.4s\n"
+    "sqdmulh v10.4s, v10.4s, v17.4s\n"
+    "sqdmulh v9.4s, v9.4s, v17.4s\n"
+    "sqdmulh v8.4s, v8.4s, v17.4s\n"
+    "sqdmulh v7.4s, v7.4s, v17.4s\n"
+    "sqdmulh v6.4s, v6.4s, v17.4s\n"
+    "sqdmulh v5.4s, v5.4s, v17.4s\n"
+    "sqdmulh v4.4s, v4.4s, v17.4s\n"
+    "sqdmulh v3.4s, v3.4s, v17.4s\n"
+    "sqdmulh v2.4s, v2.4s, v17.4s\n"
+    "sqdmulh v1.4s, v1.4s, v17.4s\n"
+    "sqdmulh v0.4s, v0.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "srshl v11.4s, v11.4s, v16.4s\n"
+    "srshl v10.4s, v10.4s, v16.4s\n"
+    "srshl v9.4s, v9.4s, v16.4s\n"
+    "srshl v8.4s, v8.4s, v16.4s\n"
+    "srshl v7.4s, v7.4s, v16.4s\n"
+    "srshl v6.4s, v6.4s, v16.4s\n"
+    "srshl v5.4s, v5.4s, v16.4s\n"
+    "srshl v4.4s, v4.4s, v16.4s\n"
+    "srshl v3.4s, v3.4s, v16.4s\n"
+    "srshl v2.4s, v2.4s, v16.4s\n"
+    "srshl v1.4s, v1.4s, v16.4s\n"
+    "srshl v0.4s, v0.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
-    "srshl v0.4s, v0.4s, v17.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
     "smax v11.4s, v11.4s, v16.4s\n"
     "smax v10.4s, v10.4s, v16.4s\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
-    "smin v11.4s, v11.4s, v19.4s\n"
-    "smin v10.4s, v10.4s, v19.4s\n"
     "smax v9.4s, v9.4s, v16.4s\n"
     "smax v8.4s, v8.4s, v16.4s\n"
     "smax v7.4s, v7.4s, v16.4s\n"
-    "smin v9.4s, v9.4s, v19.4s\n"
-    "smin v8.4s, v8.4s, v19.4s\n"
-    "smin v7.4s, v7.4s, v19.4s\n"
     "smax v6.4s, v6.4s, v16.4s\n"
     "smax v5.4s, v5.4s, v16.4s\n"
     "smax v4.4s, v4.4s, v16.4s\n"
-    "smin v6.4s, v6.4s, v19.4s\n"
-    "smin v5.4s, v5.4s, v19.4s\n"
-    "smin v4.4s, v4.4s, v19.4s\n"
     "smax v3.4s, v3.4s, v16.4s\n"
     "smax v2.4s, v2.4s, v16.4s\n"
     "smax v1.4s, v1.4s, v16.4s\n"
-    "smin v3.4s, v3.4s, v19.4s\n"
-    "smin v2.4s, v2.4s, v19.4s\n"
-    "smin v1.4s, v1.4s, v19.4s\n"
     "smax v0.4s, v0.4s, v16.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
+    "smin v11.4s, v11.4s, v17.4s\n"
+    "smin v10.4s, v10.4s, v17.4s\n"
+    "smin v9.4s, v9.4s, v17.4s\n"
+    "smin v8.4s, v8.4s, v17.4s\n"
+    "smin v7.4s, v7.4s, v17.4s\n"
+    "smin v6.4s, v6.4s, v17.4s\n"
+    "smin v5.4s, v5.4s, v17.4s\n"
+    "smin v4.4s, v4.4s, v17.4s\n"
+    "smin v3.4s, v3.4s, v17.4s\n"
+    "smin v2.4s, v2.4s, v17.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
-    "smin v0.4s, v0.4s, v19.4s\n"
     "uzp1 v22.16b, v11.16b, v10.16b\n"
-    "uzp1 v21.16b, v9.16b, v8.16b\n"
-    "uzp1 v20.16b, v7.16b, v6.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
     "uzp1 v17.16b, v5.16b, v4.16b\n"
-    "uzp1 v19.16b, v3.16b, v2.16b\n"
-    "uzp1 v18.16b, v1.16b, v0.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
     "str q16, [%x[outptr], x26]\n"
-    "uzp1 v16.16b, v22.16b, v21.16b\n"
     "add x26, x26, #0x40\n"
-    "uzp1 v17.16b, v20.16b, v17.16b\n"
-    "str q16, [%x[outptr], x25]\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
     "add x25, x25, #0x40\n"
     "str q17, [%x[outptr], x24]\n"
     "add x24, x24, #0x40\n"
@@ -325,31 +327,31 @@
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q30, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     "saddl v23.8h, v31.8b, v30.8b\n"
@@ -363,38 +365,38 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "sxtl v23.8h, v31.8b\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "movi v19.4s, #0x7f\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
     "sub %x[n_channels], %x[n_channels], #0x10\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
-    "not v16.16b, v19.16b\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
     "cmp %x[n_channels], #0x10\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -403,21 +405,21 @@
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v15.4s, #0x0\n"
-    "add %x[outptr], %x[outptr], x26\n"
-    "movi v14.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v13.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 24f\n"
     "15:"  // Oddments: 2 inputs loop
-    "movi v31.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x0]\n"
     "add x19, x19, #0x10\n"
-    "movi v30.16b, #0x0\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "add x20, x20, x26\n"
+    "movi v30.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
     "ldr d31, [x21], #0x8\n"
     "ldr d30, [x20], #0x8\n"
@@ -479,8 +481,8 @@
     "ldr b30, [x20], #0x1\n"
     "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "subs x22, x22, #0x1\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
@@ -490,9 +492,9 @@
     "ands x20, %x[n_valid_cells], #0x1\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v31.16b, #0x0\n"
     "ldr x21, [x19], #0x8\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
     "ldr d31, [x21], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
@@ -539,34 +541,34 @@
     "ldr b31, [x21], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
     "sxtl v23.8h, v31.8b\n"
-    "subs x20, x20, #0x1\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "movi v19.4s, #0x7f\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
-    "not v16.16b, v19.16b\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[shift_ptr]]\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
+    "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -615,9 +617,7 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
     : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
@@ -626,5 +626,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 7829ecc..234b444 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,27 +29,18 @@
 
 void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst
+struct a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
+  using Parent = DepthfirstStrategy<int8_t, int8_t>;
 
-  typedef void (*kern_type)(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index e344e14..1767e5c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -63,12 +63,12 @@
 
   __asm__ __volatile__(
     "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x14, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x13, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x15, #0x10\n"
-    "ldp x12, x11, [x20, #0x0]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x13, x12, [x20, #0x0]\n"
+    "mov x11, #0x0\n"
     "ldp x10, x9, [x20, #0x10]\n"
     "ldp x28, x27, [x19, #0x0]\n"
     "ldp x26, x25, [x19, #0x10]\n"
@@ -76,12 +76,12 @@
     "ldp x22, x21, [x19, #0x30]\n"
     "ldr x20, [x19, #0x40]\n"
     "blt 3f\n"
-    "ldr q30, [x27, x14]\n"
     "lsr x19, x15, #0x4\n"
-    "ldr q29, [x24, x14]\n"
     "sub x15, x15, x19, LSL #4\n"
-    "ldr q28, [x21, x14]\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
     "subs x19, x19, #0x1\n"
+    "ldr q28, [x21, x14]\n"
     "ldr q27, [x25, x14]\n"
     "ldr q26, [x28, x14]\n"
     "ldr q25, [x23, x14]\n"
@@ -92,31 +92,31 @@
     "beq 2f\n"
     "1:"  // Vector: Loop
     "smax v21.16b, v30.16b, v29.16b\n"
-    "ldr q30, [x27, x14]\n"
-    "subs x19, x19, #0x1\n"
     "smax v20.16b, v29.16b, v28.16b\n"
-    "ldr q29, [x24, x14]\n"
+    "subs x19, x19, #0x1\n"
+    "ldr q30, [x27, x14]\n"
     "smax v19.16b, v27.16b, v26.16b\n"
-    "ldr q28, [x21, x14]\n"
     "smax v18.16b, v25.16b, v24.16b\n"
-    "ldr q26, [x28, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
     "smax v17.16b, v23.16b, v27.16b\n"
-    "ldr q27, [x25, x14]\n"
     "smax v16.16b, v25.16b, v22.16b\n"
-    "ldr q25, [x23, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
     "smax v19.16b, v21.16b, v19.16b\n"
+    "smax v18.16b, v18.16b, v21.16b\n"
+    "ldr q25, [x23, x14]\n"
     "ldr q24, [x26, x14]\n"
-    "smax v18.16b, v21.16b, v18.16b\n"
-    "ldr q23, [x22, x14]\n"
     "smax v17.16b, v20.16b, v17.16b\n"
+    "smax v16.16b, v20.16b, v16.16b\n"
+    "ldr q23, [x22, x14]\n"
     "ldr q22, [x20, x14]\n"
     "add x14, x14, #0x10\n"
-    "smax v16.16b, v20.16b, v16.16b\n"
-    "str q19, [x12, x13]\n"
-    "str q18, [x11, x13]\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q19, [x13, x11]\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
     "smax v21.16b, v30.16b, v29.16b\n"
@@ -126,45 +126,44 @@
     "smax v17.16b, v23.16b, v27.16b\n"
     "smax v16.16b, v25.16b, v22.16b\n"
     "smax v19.16b, v21.16b, v19.16b\n"
-    "str q19, [x12, x13]\n"
-    "smax v18.16b, v21.16b, v18.16b\n"
+    "smax v18.16b, v18.16b, v21.16b\n"
+    "str q19, [x13, x11]\n"
     "smax v17.16b, v20.16b, v17.16b\n"
-    "str q18, [x11, x13]\n"
     "smax v16.16b, v20.16b, v16.16b\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "cbz x15, 4f\n"
     "3:"  // Oddments
     "ldr b30, [x27, x14]\n"
-    "subs x15, x15, #0x1\n"
     "ldr b29, [x24, x14]\n"
     "smax v21.16b, v30.16b, v29.16b\n"
+    "subs x15, x15, #0x1\n"
     "ldr b28, [x21, x14]\n"
     "ldr b27, [x25, x14]\n"
     "smax v20.16b, v29.16b, v28.16b\n"
     "ldr b26, [x28, x14]\n"
     "ldr b25, [x23, x14]\n"
     "smax v19.16b, v27.16b, v26.16b\n"
+    "smax v19.16b, v21.16b, v19.16b\n"
     "ldr b24, [x26, x14]\n"
     "ldr b23, [x22, x14]\n"
-    "smax v19.16b, v21.16b, v19.16b\n"
-    "ldr b22, [x20, x14]\n"
-    "add x14, x14, #0x1\n"
     "smax v18.16b, v25.16b, v24.16b\n"
-    "str b19, [x12, x13]\n"
     "smax v17.16b, v23.16b, v27.16b\n"
+    "ldr b22, [x20, x14]\n"
     "smax v16.16b, v25.16b, v22.16b\n"
-    "smax v18.16b, v21.16b, v18.16b\n"
-    "str b18, [x11, x13]\n"
+    "add x14, x14, #0x1\n"
+    "smax v18.16b, v18.16b, v21.16b\n"
     "smax v17.16b, v20.16b, v17.16b\n"
     "smax v16.16b, v20.16b, v16.16b\n"
-    "str b17, [x10, x13]\n"
-    "str b16, [x9, x13]\n"
-    "add x13, x13, #0x1\n"
+    "str b19, [x13, x11]\n"
+    "str b18, [x12, x11]\n"
+    "str b17, [x10, x11]\n"
+    "str b16, [x9, x11]\n"
+    "add x11, x11, #0x1\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
@@ -173,4 +172,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-#endif // defined(__aarch64__)
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp
index 6c4cd14..ba6d52f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_s8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
 
-struct a64_s8_nhwc_max_generic_depthfirst
+struct a64_s8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_s8_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
   a64_s8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_s8_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
index 5e4c84d..9bf3136 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,10 +22,11 @@
  * SOFTWARE.
  */
 
-#include <cstdint>
-
 #if defined(__aarch64__)
 
+#include <cstdint>
+#include <cstddef>
+
 namespace arm_conv {
 namespace pooling {
 
@@ -39,30 +40,30 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v7.16b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v6.16b, #0x80\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x80\n"
     "movi v5.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "movi v4.16b, #0x80\n"
+    "movi v3.16b, #0x80\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -75,47 +76,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "smax v22.16b, v31.16b, v30.16b\n"
-    "ldr q3, [x23, x28]\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
     "smax v18.16b, v29.16b, v28.16b\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "smax v21.16b, v27.16b, v21.16b\n"
-    "ldr q2, [x22, x28]\n"
     "smax v17.16b, v26.16b, v17.16b\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "smax v20.16b, v25.16b, v20.16b\n"
-    "ldr q0, [x20, x28]\n"
     "smax v16.16b, v24.16b, v16.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "ldr q30, [x22, x27]\n"
     "smax v18.16b, v22.16b, v18.16b\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "smax v17.16b, v21.16b, v17.16b\n"
-    "ldr q28, [x20, x27]\n"
     "smax v16.16b, v20.16b, v16.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "smax v5.16b, v5.16b, v18.16b\n"
     "ldr q27, [x23, x26]\n"
-    "smax v7.16b, v7.16b, v19.16b\n"
     "ldr q21, [x22, x26]\n"
-    "smax v6.16b, v6.16b, v18.16b\n"
+    "smax v4.16b, v4.16b, v17.16b\n"
+    "smax v3.16b, v3.16b, v16.16b\n"
     "ldr q26, [x21, x26]\n"
-    "smax v5.16b, v5.16b, v17.16b\n"
     "ldr q17, [x20, x26]\n"
-    "smax v4.16b, v4.16b, v16.16b\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "smax v23.16b, v3.16b, v2.16b\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
-    "smax v22.16b, v31.16b, v30.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
     "smax v18.16b, v29.16b, v28.16b\n"
     "smax v21.16b, v27.16b, v21.16b\n"
     "smax v17.16b, v26.16b, v17.16b\n"
@@ -125,315 +126,312 @@
     "smax v18.16b, v22.16b, v18.16b\n"
     "smax v17.16b, v21.16b, v17.16b\n"
     "smax v16.16b, v20.16b, v16.16b\n"
-    "smax v7.16b, v7.16b, v19.16b\n"
-    "smax v6.16b, v6.16b, v18.16b\n"
-    "smax v5.16b, v5.16b, v17.16b\n"
-    "smax v4.16b, v4.16b, v16.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "smax v5.16b, v5.16b, v18.16b\n"
+    "smax v4.16b, v4.16b, v17.16b\n"
+    "smax v3.16b, v3.16b, v16.16b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "smax v7.16b, v7.16b, v3.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "smax v6.16b, v6.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "smax v6.16b, v6.16b, v31.16b\n"
+    "smax v5.16b, v5.16b, v30.16b\n"
+    "smax v4.16b, v4.16b, v27.16b\n"
     "ldr q25, [x23, x25]\n"
-    "smax v5.16b, v5.16b, v27.16b\n"
-    "smax v4.16b, v4.16b, v25.16b\n"
+    "smax v3.16b, v3.16b, v25.16b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x40\n"
-    "str q6, [%x[outptr], x27]\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x40\n"
     "cmp %x[n_channels], #0x40\n"
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 43f\n"
     "7:"  // Single vector of channels
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v7.16b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "smax v7.16b, v7.16b, v19.16b\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "smax v23.16b, v3.16b, v2.16b\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "smax v7.16b, v7.16b, v19.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "smax v7.16b, v7.16b, v3.16b\n"
+    "smax v6.16b, v6.16b, v2.16b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x10\n"
     "cmp %x[n_channels], #0x10\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v7.16b, #0x80\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v6.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 24f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
-    "ld1 { v2.b }[14], [x22], #0x1\n"
-    "ld1 { v1.b }[14], [x21], #0x1\n"
-    "ld1 { v0.b }[14], [x20], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
     "b 23f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
-    "ld1 { v2.b }[12], [x22], #0x1\n"
-    "ld1 { v1.b }[12], [x21], #0x1\n"
-    "ld1 { v0.b }[12], [x20], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
     "b 23f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
-    "ld1 { v2.b }[10], [x22], #0x1\n"
-    "ld1 { v1.b }[10], [x21], #0x1\n"
-    "ld1 { v0.b }[10], [x20], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
     "b 23f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
-    "ld1 { v2.b }[8], [x22], #0x1\n"
-    "ld1 { v1.b }[8], [x21], #0x1\n"
-    "ld1 { v0.b }[8], [x20], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
     "b 23f\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 21f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
-    "ld1 { v2.b }[6], [x22], #0x1\n"
-    "ld1 { v1.b }[6], [x21], #0x1\n"
-    "ld1 { v0.b }[6], [x20], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
     "b 23f\n"
     "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
-    "ld1 { v2.b }[4], [x22], #0x1\n"
-    "ld1 { v1.b }[4], [x21], #0x1\n"
-    "ld1 { v0.b }[4], [x20], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
     "b 23f\n"
     "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 22f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
-    "ld1 { v2.b }[2], [x22], #0x1\n"
-    "ld1 { v1.b }[2], [x21], #0x1\n"
-    "ld1 { v0.b }[2], [x20], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
     "b 23f\n"
     "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ldr b3, [x23], #0x1\n"
-    "ldr b2, [x22], #0x1\n"
-    "ldr b1, [x21], #0x1\n"
-    "ldr b0, [x20], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
     "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "smax v7.16b, v7.16b, v19.16b\n"
+    "smax v6.16b, v6.16b, v19.16b\n"
     "bgt 15b\n"
     "24:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #1, 26f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
     "b 33f\n"
     "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
     "b 33f\n"
     "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 28f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
     "b 33f\n"
     "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
     "b 33f\n"
     "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 31f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #1, 30f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
     "b 33f\n"
     "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
     "b 33f\n"
     "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 32f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
     "b 33f\n"
     "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ldr b3, [x23], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
-    "smax v7.16b, v7.16b, v3.16b\n"
     "subs x20, x20, #0x1\n"
+    "smax v6.16b, v6.16b, v2.16b\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
     "tbz %x[n_channels], #3, 38f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #2, 36f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #1, 35f\n"
-    "st1 { v7.h }[6], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[14], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[12], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 37f\n"
-    "st1 { v7.h }[4], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[10], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[8], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "38:"  // Oddments: Store: Bit 3: Unset
     "tbz %x[n_channels], #2, 40f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #1, 39f\n"
-    "st1 { v7.h }[2], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[6], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[4], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 41f\n"
-    "st1 { v7.h }[0], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[2], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[0], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp
index a50e99a..d5d7313 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_s8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
 
-struct a64_s8q_nhwc_avg_generic_depthfirst
+struct a64_s8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_s8q_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
   a64_s8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_s8q_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 530ee06..a2487b0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,14 +22,15 @@
  * SOFTWARE.
  */
 
+#if defined(__aarch64__)
+
 #include "pooling.hpp"
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(__aarch64__)
-
 namespace arm_conv {
 namespace pooling {
 
@@ -86,13 +87,13 @@
       f_rescale_value *= 2.0f;
     }
 
-    int64_t large_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
-    if (large_rescale_value == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      large_rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
-    rescale_value = static_cast<int32_t>(large_rescale_value);
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   // Combine together the rescale value for the requantization and the scaling
@@ -113,17 +114,17 @@
   );
 
   __asm__ __volatile__(
-    "mov x26, #0x0\n"
-    "mov x25, #0x10\n" // cntb _, ALL, #1
-    "mov x24, #0x20\n" // cntb _, ALL, #2
-    "mov x23, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n"  // cntb _, ALL, #1
+    "mov x24, #0x20\n"  // cntb _, ALL, #2
+    "mov x23, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "movi v11.4s, #0x0\n"
@@ -140,10 +141,10 @@
     "movi v0.4s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "ldr q29, [x21, x25]\n"
     "ldr q28, [x20, x25]\n"
     "ldr q27, [x21, x24]\n"
@@ -153,28 +154,28 @@
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "saddl v21.8h, v29.8b, v28.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "saddl v21.8h, v29.8b, v28.8b\n"
     "saddl2 v20.8h, v29.16b, v28.16b\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "saddl v19.8h, v27.8b, v26.8b\n"
-    "ldr q29, [x21, x25]\n"
     "saddl2 v18.8h, v27.16b, v26.16b\n"
-    "ldr q28, [x20, x25]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
     "saddl v17.8h, v25.8b, v24.8b\n"
-    "ldr q27, [x21, x24]\n"
     "saddl2 v16.8h, v25.16b, v24.16b\n"
-    "ldr q26, [x20, x24]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
-    "ldr q25, [x21, x23]\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q24, [x20, x23]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q24, [x20, x23]\n"
     "saddw v11.4s, v11.4s, v21.4h\n"
     "saddw2 v10.4s, v10.4s, v21.8h\n"
     "saddw v9.4s, v9.4s, v20.4h\n"
@@ -218,17 +219,17 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "sxtl v23.8h, v31.8b\n"
-    "ldr q29, [x21, x25]\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "ldr q29, [x21, x25]\n"
     "ldr q27, [x21, x24]\n"
-    "ldr q25, [x21, x23]\n"
     "sxtl v21.8h, v29.8b\n"
     "sxtl2 v20.8h, v29.16b\n"
+    "ldr q25, [x21, x23]\n"
     "sxtl v19.8h, v27.8b\n"
     "sxtl2 v18.8h, v27.16b\n"
+    "subs x20, x20, #0x1\n"
     "sxtl v17.8h, v25.8b\n"
     "sxtl2 v16.8h, v25.16b\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
@@ -249,108 +250,108 @@
     "saddw2 v0.4s, v0.4s, v16.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "movi v20.4s, #0x7f\n"
-    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x40\n"
     "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
     "srshl v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[right_shift]]\n"
-    "not v16.16b, v20.16b\n"
     "srshl v14.4s, v14.4s, v18.4s\n"
-    "cmp %x[n_channels], #0x40\n"
     "srshl v13.4s, v13.4s, v18.4s\n"
     "srshl v12.4s, v12.4s, v18.4s\n"
+    "ld1r { v16.4s }, [%x[right_shift]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
     "srshl v11.4s, v11.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
-    "sqrdmulh v11.4s, v11.4s, v19.4s\n"
     "srshl v10.4s, v10.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x40\n"
     "srshl v9.4s, v9.4s, v18.4s\n"
     "srshl v8.4s, v8.4s, v18.4s\n"
-    "srshl v11.4s, v11.4s, v17.4s\n"
-    "sqrdmulh v10.4s, v10.4s, v19.4s\n"
-    "sqrdmulh v9.4s, v9.4s, v19.4s\n"
-    "sqrdmulh v8.4s, v8.4s, v19.4s\n"
     "srshl v7.4s, v7.4s, v18.4s\n"
-    "srshl v10.4s, v10.4s, v17.4s\n"
-    "srshl v9.4s, v9.4s, v17.4s\n"
-    "srshl v8.4s, v8.4s, v17.4s\n"
-    "sqrdmulh v7.4s, v7.4s, v19.4s\n"
     "srshl v6.4s, v6.4s, v18.4s\n"
     "srshl v5.4s, v5.4s, v18.4s\n"
     "srshl v4.4s, v4.4s, v18.4s\n"
-    "srshl v7.4s, v7.4s, v17.4s\n"
-    "sqrdmulh v6.4s, v6.4s, v19.4s\n"
-    "sqrdmulh v5.4s, v5.4s, v19.4s\n"
-    "sqrdmulh v4.4s, v4.4s, v19.4s\n"
     "srshl v3.4s, v3.4s, v18.4s\n"
-    "srshl v6.4s, v6.4s, v17.4s\n"
-    "srshl v5.4s, v5.4s, v17.4s\n"
-    "srshl v4.4s, v4.4s, v17.4s\n"
-    "sqrdmulh v3.4s, v3.4s, v19.4s\n"
     "srshl v2.4s, v2.4s, v18.4s\n"
     "srshl v1.4s, v1.4s, v18.4s\n"
     "srshl v0.4s, v0.4s, v18.4s\n"
-    "srshl v3.4s, v3.4s, v17.4s\n"
-    "sqrdmulh v2.4s, v2.4s, v19.4s\n"
-    "sqrdmulh v1.4s, v1.4s, v19.4s\n"
-    "sqrdmulh v0.4s, v0.4s, v19.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v17.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v17.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v17.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v17.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v17.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v17.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v17.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v17.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v17.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v17.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "srshl v11.4s, v11.4s, v16.4s\n"
+    "srshl v10.4s, v10.4s, v16.4s\n"
+    "srshl v9.4s, v9.4s, v16.4s\n"
+    "srshl v8.4s, v8.4s, v16.4s\n"
+    "srshl v7.4s, v7.4s, v16.4s\n"
+    "srshl v6.4s, v6.4s, v16.4s\n"
+    "srshl v5.4s, v5.4s, v16.4s\n"
+    "srshl v4.4s, v4.4s, v16.4s\n"
+    "srshl v3.4s, v3.4s, v16.4s\n"
+    "srshl v2.4s, v2.4s, v16.4s\n"
+    "srshl v1.4s, v1.4s, v16.4s\n"
+    "srshl v0.4s, v0.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
-    "srshl v2.4s, v2.4s, v17.4s\n"
-    "srshl v1.4s, v1.4s, v17.4s\n"
-    "srshl v0.4s, v0.4s, v17.4s\n"
-    "smin v15.4s, v15.4s, v20.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
-    "smin v14.4s, v14.4s, v20.4s\n"
-    "smin v13.4s, v13.4s, v20.4s\n"
-    "smin v12.4s, v12.4s, v20.4s\n"
     "smax v11.4s, v11.4s, v16.4s\n"
     "smax v10.4s, v10.4s, v16.4s\n"
     "smax v9.4s, v9.4s, v16.4s\n"
-    "smin v11.4s, v11.4s, v20.4s\n"
-    "smin v10.4s, v10.4s, v20.4s\n"
-    "smin v9.4s, v9.4s, v20.4s\n"
     "smax v8.4s, v8.4s, v16.4s\n"
     "smax v7.4s, v7.4s, v16.4s\n"
     "smax v6.4s, v6.4s, v16.4s\n"
-    "smin v8.4s, v8.4s, v20.4s\n"
-    "smin v7.4s, v7.4s, v20.4s\n"
-    "smin v6.4s, v6.4s, v20.4s\n"
     "smax v5.4s, v5.4s, v16.4s\n"
     "smax v4.4s, v4.4s, v16.4s\n"
     "smax v3.4s, v3.4s, v16.4s\n"
-    "smin v5.4s, v5.4s, v20.4s\n"
-    "smin v4.4s, v4.4s, v20.4s\n"
-    "smin v3.4s, v3.4s, v20.4s\n"
     "smax v2.4s, v2.4s, v16.4s\n"
     "smax v1.4s, v1.4s, v16.4s\n"
     "smax v0.4s, v0.4s, v16.4s\n"
-    "smin v2.4s, v2.4s, v20.4s\n"
-    "smin v1.4s, v1.4s, v20.4s\n"
-    "smin v0.4s, v0.4s, v20.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
+    "smin v11.4s, v11.4s, v17.4s\n"
+    "smin v10.4s, v10.4s, v17.4s\n"
+    "smin v9.4s, v9.4s, v17.4s\n"
+    "smin v8.4s, v8.4s, v17.4s\n"
+    "smin v7.4s, v7.4s, v17.4s\n"
+    "smin v6.4s, v6.4s, v17.4s\n"
+    "smin v5.4s, v5.4s, v17.4s\n"
+    "smin v4.4s, v4.4s, v17.4s\n"
+    "smin v3.4s, v3.4s, v17.4s\n"
+    "smin v2.4s, v2.4s, v17.4s\n"
+    "smin v1.4s, v1.4s, v17.4s\n"
+    "smin v0.4s, v0.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v22.16b, v11.16b, v10.16b\n"
-    "uzp1 v21.16b, v9.16b, v8.16b\n"
-    "uzp1 v20.16b, v7.16b, v6.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
     "uzp1 v17.16b, v5.16b, v4.16b\n"
-    "uzp1 v19.16b, v3.16b, v2.16b\n"
-    "uzp1 v18.16b, v1.16b, v0.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
     "str q16, [%x[outptr], x26]\n"
-    "uzp1 v16.16b, v22.16b, v21.16b\n"
     "add x26, x26, #0x40\n"
-    "uzp1 v17.16b, v20.16b, v17.16b\n"
-    "str q16, [%x[outptr], x25]\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
     "add x25, x25, #0x40\n"
     "str q17, [%x[outptr], x24]\n"
     "add x24, x24, #0x40\n"
@@ -362,31 +363,31 @@
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "saddw v15.4s, v15.4s, v23.4h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q30, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     "saddl v23.8h, v31.8b, v30.8b\n"
@@ -400,43 +401,43 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "sxtl v23.8h, v31.8b\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "movi v20.4s, #0x7f\n"
-    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x10\n"
     "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
     "srshl v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[right_shift]]\n"
-    "not v16.16b, v20.16b\n"
     "srshl v14.4s, v14.4s, v18.4s\n"
-    "cmp %x[n_channels], #0x10\n"
     "srshl v13.4s, v13.4s, v18.4s\n"
     "srshl v12.4s, v12.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
+    "ld1r { v16.4s }, [%x[right_shift]]\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "sqrdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+    "cmp %x[n_channels], #0x10\n"
+    "sqrdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
-    "smin v15.4s, v15.4s, v20.4s\n"
-    "smin v14.4s, v14.4s, v20.4s\n"
-    "smin v13.4s, v13.4s, v20.4s\n"
-    "smin v12.4s, v12.4s, v20.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -445,21 +446,21 @@
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v15.4s, #0x0\n"
-    "add %x[outptr], %x[outptr], x26\n"
-    "movi v14.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v13.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 24f\n"
     "15:"  // Oddments: 2 inputs loop
-    "movi v31.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x0]\n"
     "add x19, x19, #0x10\n"
-    "movi v30.16b, #0x0\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "add x20, x20, x26\n"
+    "movi v30.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
     "ldr d31, [x21], #0x8\n"
     "ldr d30, [x20], #0x8\n"
@@ -521,8 +522,8 @@
     "ldr b30, [x20], #0x1\n"
     "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
     "saddl v23.8h, v31.8b, v30.8b\n"
-    "subs x22, x22, #0x1\n"
     "saddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
@@ -532,9 +533,9 @@
     "ands x20, %x[n_valid_cells], #0x1\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v31.16b, #0x0\n"
     "ldr x21, [x19], #0x8\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
     "ldr d31, [x21], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
@@ -581,39 +582,39 @@
     "ldr b31, [x21], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
     "sxtl v23.8h, v31.8b\n"
-    "subs x20, x20, #0x1\n"
     "sxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "saddw v15.4s, v15.4s, v23.4h\n"
     "saddw2 v14.4s, v14.4s, v23.8h\n"
     "saddw v13.4s, v13.4s, v22.4h\n"
     "saddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "movi v20.4s, #0x7f\n"
-    "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
-    "not v16.16b, v20.16b\n"
     "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
     "srshl v15.4s, v15.4s, v18.4s\n"
-    "ld1r { v17.4s }, [%x[right_shift]]\n"
     "srshl v14.4s, v14.4s, v18.4s\n"
     "srshl v13.4s, v13.4s, v18.4s\n"
     "srshl v12.4s, v12.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v19.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v19.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v19.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v19.4s\n"
-    "srshl v15.4s, v15.4s, v17.4s\n"
-    "srshl v14.4s, v14.4s, v17.4s\n"
-    "srshl v13.4s, v13.4s, v17.4s\n"
-    "srshl v12.4s, v12.4s, v17.4s\n"
+    "ld1r { v16.4s }, [%x[right_shift]]\n"
+    "sqrdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v17.4s\n"
+    "movi v17.4s, #0x7f\n"
+    "srshl v15.4s, v15.4s, v16.4s\n"
+    "srshl v14.4s, v14.4s, v16.4s\n"
+    "srshl v13.4s, v13.4s, v16.4s\n"
+    "srshl v12.4s, v12.4s, v16.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v15.4s, v15.4s, v16.4s\n"
     "smax v14.4s, v14.4s, v16.4s\n"
     "smax v13.4s, v13.4s, v16.4s\n"
     "smax v12.4s, v12.4s, v16.4s\n"
-    "smin v15.4s, v15.4s, v20.4s\n"
-    "smin v14.4s, v14.4s, v20.4s\n"
-    "smin v13.4s, v13.4s, v20.4s\n"
-    "smin v12.4s, v12.4s, v20.4s\n"
+    "smin v15.4s, v15.4s, v17.4s\n"
+    "smin v14.4s, v14.4s, v17.4s\n"
+    "smin v13.4s, v13.4s, v17.4s\n"
+    "smin v12.4s, v12.4s, v17.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -662,9 +663,7 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [right_shift] "r" (&right_shift)
     : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
@@ -673,5 +672,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp
index ea7f7f8..68e7a98 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_s8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
 
-struct a64_s8q_nhwc_max_generic_depthfirst
+struct a64_s8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_s8q_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
   a64_s8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_s8q_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
index a077121..5a6cfb4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,12 +21,11 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+#if defined(__aarch64__)
 
 #include "pooling.hpp"
-#include <cstddef>
 #include <cstdint>
-
-#if defined(__aarch64__)
+#include <cstddef>
 
 namespace arm_conv {
 namespace pooling {
@@ -42,30 +41,30 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v8.16b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v7.16b, #0x80\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x80\n"
+    "movi v3.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "movi v6.16b, #0x80\n"
     "movi v5.16b, #0x80\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -78,47 +77,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "smax v22.16b, v31.16b, v30.16b\n"
-    "ldr q3, [x23, x28]\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
     "smax v18.16b, v29.16b, v28.16b\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "smax v21.16b, v27.16b, v21.16b\n"
-    "ldr q2, [x22, x28]\n"
     "smax v17.16b, v26.16b, v17.16b\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "smax v20.16b, v25.16b, v20.16b\n"
-    "ldr q0, [x20, x28]\n"
     "smax v16.16b, v24.16b, v16.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "ldr q30, [x22, x27]\n"
     "smax v18.16b, v22.16b, v18.16b\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "smax v17.16b, v21.16b, v17.16b\n"
-    "ldr q28, [x20, x27]\n"
     "smax v16.16b, v20.16b, v16.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "smax v4.16b, v4.16b, v19.16b\n"
+    "smax v3.16b, v3.16b, v18.16b\n"
     "ldr q27, [x23, x26]\n"
-    "smax v8.16b, v8.16b, v19.16b\n"
     "ldr q21, [x22, x26]\n"
-    "smax v7.16b, v7.16b, v18.16b\n"
-    "ldr q26, [x21, x26]\n"
     "smax v6.16b, v6.16b, v17.16b\n"
-    "ldr q17, [x20, x26]\n"
     "smax v5.16b, v5.16b, v16.16b\n"
+    "ldr q26, [x21, x26]\n"
+    "ldr q17, [x20, x26]\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "smax v23.16b, v3.16b, v2.16b\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
-    "smax v22.16b, v31.16b, v30.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
+    "smax v22.16b, v30.16b, v22.16b\n"
     "smax v18.16b, v29.16b, v28.16b\n"
     "smax v21.16b, v27.16b, v21.16b\n"
     "smax v17.16b, v26.16b, v17.16b\n"
@@ -128,8 +127,8 @@
     "smax v18.16b, v22.16b, v18.16b\n"
     "smax v17.16b, v21.16b, v17.16b\n"
     "smax v16.16b, v20.16b, v16.16b\n"
-    "smax v8.16b, v8.16b, v19.16b\n"
-    "smax v7.16b, v7.16b, v18.16b\n"
+    "smax v4.16b, v4.16b, v19.16b\n"
+    "smax v3.16b, v3.16b, v18.16b\n"
     "smax v6.16b, v6.16b, v17.16b\n"
     "smax v5.16b, v5.16b, v16.16b\n"
     "4:"  // 4-vectors of channels: After loop
@@ -137,146 +136,146 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "smax v8.16b, v8.16b, v3.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "smax v4.16b, v4.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "smax v7.16b, v7.16b, v31.16b\n"
-    "ldr q25, [x23, x25]\n"
+    "smax v3.16b, v3.16b, v30.16b\n"
     "smax v6.16b, v6.16b, v27.16b\n"
+    "ldr q25, [x23, x25]\n"
     "smax v5.16b, v5.16b, v25.16b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "sxtl v23.8h, v8.8b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "ld1r { v4.4s }, [x19]\n"
-    "sxtl2 v22.8h, v8.16b\n"
+    "sxtl v23.8h, v4.8b\n"
+    "sxtl2 v22.8h, v4.16b\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "sxtl v21.8h, v7.8b\n"
+    "ld1r { v4.4s }, [x19]\n"
+    "sxtl v21.8h, v3.8b\n"
+    "sxtl2 v18.8h, v3.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
     "ld1r { v3.4s }, [x19]\n"
+    "sxtl v20.8h, v6.8b\n"
+    "sxtl2 v19.8h, v6.16b\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    "sxtl2 v20.8h, v7.16b\n"
     "ld1r { v2.4s }, [x19]\n"
-    "sub %x[n_channels], %x[n_channels], #0x40\n"
-    "sxtl v19.8h, v6.8b\n"
-    "cmp %x[n_channels], #0x40\n"
-    "sxtl2 v18.8h, v6.16b\n"
     "sxtl v17.8h, v5.8b\n"
     "sxtl2 v16.8h, v5.16b\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "cmp %x[n_channels], #0x40\n"
     "sxtl v1.4s, v23.4h\n"
     "sxtl2 v23.4s, v23.8h\n"
     "sxtl v0.4s, v22.4h\n"
     "sxtl2 v31.4s, v22.8h\n"
     "sxtl v30.4s, v21.4h\n"
     "sxtl2 v22.4s, v21.8h\n"
-    "sxtl v29.4s, v20.4h\n"
+    "sxtl v29.4s, v18.4h\n"
+    "sxtl2 v18.4s, v18.8h\n"
+    "sxtl v28.4s, v20.4h\n"
     "sxtl2 v21.4s, v20.8h\n"
-    "sxtl v28.4s, v19.4h\n"
-    "sxtl2 v20.4s, v19.8h\n"
-    "sxtl v27.4s, v18.4h\n"
-    "sxtl2 v26.4s, v18.8h\n"
+    "sxtl v27.4s, v19.4h\n"
+    "sxtl2 v26.4s, v19.8h\n"
     "sxtl v25.4s, v17.4h\n"
-    "sxtl2 v19.4s, v17.8h\n"
+    "sxtl2 v20.4s, v17.8h\n"
     "sxtl v24.4s, v16.4h\n"
-    "sxtl2 v18.4s, v16.8h\n"
-    "srshl v1.4s, v1.4s, v3.4s\n"
-    "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v0.4s, v0.4s, v3.4s\n"
-    "srshl v31.4s, v31.4s, v3.4s\n"
-    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
-    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "sxtl2 v19.4s, v16.8h\n"
+    "srshl v1.4s, v1.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v4.4s\n"
+    "srshl v0.4s, v0.4s, v4.4s\n"
+    "srshl v31.4s, v31.4s, v4.4s\n"
+    "srshl v30.4s, v30.4s, v4.4s\n"
+    "srshl v22.4s, v22.4s, v4.4s\n"
+    "srshl v29.4s, v29.4s, v4.4s\n"
+    "srshl v18.4s, v18.4s, v4.4s\n"
+    "srshl v28.4s, v28.4s, v4.4s\n"
+    "srshl v21.4s, v21.4s, v4.4s\n"
+    "srshl v27.4s, v27.4s, v4.4s\n"
+    "srshl v26.4s, v26.4s, v4.4s\n"
+    "srshl v25.4s, v25.4s, v4.4s\n"
+    "srshl v20.4s, v20.4s, v4.4s\n"
+    "srshl v24.4s, v24.4s, v4.4s\n"
+    "srshl v19.4s, v19.4s, v4.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v3.4s\n"
+    "sqrdmulh v30.4s, v30.4s, v3.4s\n"
+    "sqrdmulh v22.4s, v22.4s, v3.4s\n"
+    "sqrdmulh v29.4s, v29.4s, v3.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v3.4s\n"
+    "sqrdmulh v28.4s, v28.4s, v3.4s\n"
+    "sqrdmulh v21.4s, v21.4s, v3.4s\n"
+    "sqrdmulh v27.4s, v27.4s, v3.4s\n"
+    "sqrdmulh v26.4s, v26.4s, v3.4s\n"
+    "sqrdmulh v25.4s, v25.4s, v3.4s\n"
+    "sqrdmulh v20.4s, v20.4s, v3.4s\n"
+    "sqrdmulh v24.4s, v24.4s, v3.4s\n"
+    "sqrdmulh v19.4s, v19.4s, v3.4s\n"
+    "movi v17.4s, #0x7f\n"
     "srshl v1.4s, v1.4s, v2.4s\n"
     "srshl v23.4s, v23.4s, v2.4s\n"
     "srshl v0.4s, v0.4s, v2.4s\n"
     "srshl v31.4s, v31.4s, v2.4s\n"
-    "srshl v30.4s, v30.4s, v3.4s\n"
-    "srshl v22.4s, v22.4s, v3.4s\n"
-    "srshl v29.4s, v29.4s, v3.4s\n"
-    "srshl v21.4s, v21.4s, v3.4s\n"
-    "sqrdmulh v30.4s, v30.4s, v4.4s\n"
-    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
-    "sqrdmulh v29.4s, v29.4s, v4.4s\n"
-    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
     "srshl v30.4s, v30.4s, v2.4s\n"
     "srshl v22.4s, v22.4s, v2.4s\n"
     "srshl v29.4s, v29.4s, v2.4s\n"
-    "srshl v21.4s, v21.4s, v2.4s\n"
-    "srshl v28.4s, v28.4s, v3.4s\n"
-    "srshl v20.4s, v20.4s, v3.4s\n"
-    "srshl v27.4s, v27.4s, v3.4s\n"
-    "srshl v26.4s, v26.4s, v3.4s\n"
-    "sqrdmulh v28.4s, v28.4s, v4.4s\n"
-    "sqrdmulh v20.4s, v20.4s, v4.4s\n"
-    "sqrdmulh v27.4s, v27.4s, v4.4s\n"
-    "sqrdmulh v26.4s, v26.4s, v4.4s\n"
+    "srshl v18.4s, v18.4s, v2.4s\n"
     "srshl v28.4s, v28.4s, v2.4s\n"
-    "srshl v20.4s, v20.4s, v2.4s\n"
+    "srshl v21.4s, v21.4s, v2.4s\n"
     "srshl v27.4s, v27.4s, v2.4s\n"
     "srshl v26.4s, v26.4s, v2.4s\n"
-    "srshl v25.4s, v25.4s, v3.4s\n"
-    "srshl v19.4s, v19.4s, v3.4s\n"
-    "srshl v24.4s, v24.4s, v3.4s\n"
-    "srshl v18.4s, v18.4s, v3.4s\n"
-    "sqrdmulh v25.4s, v25.4s, v4.4s\n"
-    "sqrdmulh v19.4s, v19.4s, v4.4s\n"
-    "sqrdmulh v24.4s, v24.4s, v4.4s\n"
-    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
     "srshl v25.4s, v25.4s, v2.4s\n"
-    "srshl v19.4s, v19.4s, v2.4s\n"
+    "srshl v20.4s, v20.4s, v2.4s\n"
     "srshl v24.4s, v24.4s, v2.4s\n"
-    "srshl v18.4s, v18.4s, v2.4s\n"
-    "movi v17.4s, #0x7f\n"
+    "srshl v19.4s, v19.4s, v2.4s\n"
     "not v16.16b, v17.16b\n"
     "smax v1.4s, v1.4s, v16.4s\n"
     "smax v23.4s, v23.4s, v16.4s\n"
     "smax v0.4s, v0.4s, v16.4s\n"
     "smax v31.4s, v31.4s, v16.4s\n"
+    "smax v30.4s, v30.4s, v16.4s\n"
+    "smax v22.4s, v22.4s, v16.4s\n"
+    "smax v29.4s, v29.4s, v16.4s\n"
+    "smax v18.4s, v18.4s, v16.4s\n"
+    "smax v28.4s, v28.4s, v16.4s\n"
+    "smax v21.4s, v21.4s, v16.4s\n"
+    "smax v27.4s, v27.4s, v16.4s\n"
+    "smax v26.4s, v26.4s, v16.4s\n"
+    "smax v25.4s, v25.4s, v16.4s\n"
+    "smax v20.4s, v20.4s, v16.4s\n"
+    "smax v24.4s, v24.4s, v16.4s\n"
+    "smax v19.4s, v19.4s, v16.4s\n"
     "smin v1.4s, v1.4s, v17.4s\n"
     "smin v23.4s, v23.4s, v17.4s\n"
     "smin v0.4s, v0.4s, v17.4s\n"
     "smin v31.4s, v31.4s, v17.4s\n"
-    "smax v30.4s, v30.4s, v16.4s\n"
-    "smax v22.4s, v22.4s, v16.4s\n"
-    "smax v29.4s, v29.4s, v16.4s\n"
     "smin v30.4s, v30.4s, v17.4s\n"
     "smin v22.4s, v22.4s, v17.4s\n"
     "smin v29.4s, v29.4s, v17.4s\n"
-    "smax v21.4s, v21.4s, v16.4s\n"
-    "smax v28.4s, v28.4s, v16.4s\n"
-    "smax v20.4s, v20.4s, v16.4s\n"
-    "smin v21.4s, v21.4s, v17.4s\n"
+    "smin v18.4s, v18.4s, v17.4s\n"
     "smin v28.4s, v28.4s, v17.4s\n"
-    "smin v20.4s, v20.4s, v17.4s\n"
-    "smax v27.4s, v27.4s, v16.4s\n"
-    "smax v26.4s, v26.4s, v16.4s\n"
-    "smax v25.4s, v25.4s, v16.4s\n"
+    "smin v21.4s, v21.4s, v17.4s\n"
     "smin v27.4s, v27.4s, v17.4s\n"
     "smin v26.4s, v26.4s, v17.4s\n"
     "smin v25.4s, v25.4s, v17.4s\n"
-    "smax v19.4s, v19.4s, v16.4s\n"
-    "smax v24.4s, v24.4s, v16.4s\n"
-    "smax v18.4s, v18.4s, v16.4s\n"
-    "smin v19.4s, v19.4s, v17.4s\n"
+    "smin v20.4s, v20.4s, v17.4s\n"
     "smin v24.4s, v24.4s, v17.4s\n"
-    "smin v18.4s, v18.4s, v17.4s\n"
+    "smin v19.4s, v19.4s, v17.4s\n"
     "uzp1 v23.16b, v1.16b, v23.16b\n"
     "uzp1 v16.16b, v0.16b, v31.16b\n"
     "uzp1 v22.16b, v30.16b, v22.16b\n"
-    "uzp1 v21.16b, v29.16b, v21.16b\n"
-    "uzp1 v20.16b, v28.16b, v20.16b\n"
+    "uzp1 v18.16b, v29.16b, v18.16b\n"
+    "uzp1 v21.16b, v28.16b, v21.16b\n"
     "uzp1 v17.16b, v27.16b, v26.16b\n"
-    "uzp1 v19.16b, v25.16b, v19.16b\n"
-    "uzp1 v18.16b, v24.16b, v18.16b\n"
+    "uzp1 v20.16b, v25.16b, v20.16b\n"
+    "uzp1 v19.16b, v24.16b, v19.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
     "str q16, [%x[outptr], x28]\n"
-    "uzp1 v16.16b, v22.16b, v21.16b\n"
     "add x28, x28, #0x40\n"
-    "uzp1 v17.16b, v20.16b, v17.16b\n"
-    "str q16, [%x[outptr], x27]\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x27]\n"
     "add x27, x27, #0x40\n"
     "str q17, [%x[outptr], x26]\n"
     "add x26, x26, #0x40\n"
@@ -288,76 +287,76 @@
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v8.16b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v4.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "smax v8.16b, v8.16b, v19.16b\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "smax v4.16b, v4.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "smax v23.16b, v3.16b, v2.16b\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "smax v8.16b, v8.16b, v19.16b\n"
+    "smax v4.16b, v4.16b, v19.16b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "smax v8.16b, v8.16b, v3.16b\n"
+    "smax v4.16b, v4.16b, v2.16b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "sxtl v23.8h, v8.8b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "ld1r { v4.4s }, [x19]\n"
-    "sxtl2 v22.8h, v8.16b\n"
+    "sxtl v23.8h, v4.8b\n"
+    "sxtl2 v22.8h, v4.16b\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "movi v17.4s, #0x7f\n"
-    "ld1r { v3.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1r { v4.4s }, [x19]\n"
     "sxtl v1.4s, v23.4h\n"
-    "ld1r { v2.4s }, [x19]\n"
-    "not v16.16b, v17.16b\n"
     "sxtl2 v23.4s, v23.8h\n"
-    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v3.4s }, [x19]\n"
     "sxtl v0.4s, v22.4h\n"
-    "cmp %x[n_channels], #0x10\n"
     "sxtl2 v31.4s, v22.8h\n"
-    "srshl v1.4s, v1.4s, v3.4s\n"
-    "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v0.4s, v0.4s, v3.4s\n"
-    "srshl v31.4s, v31.4s, v3.4s\n"
-    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
-    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "srshl v1.4s, v1.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v4.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "cmp %x[n_channels], #0x10\n"
+    "srshl v0.4s, v0.4s, v4.4s\n"
+    "srshl v31.4s, v31.4s, v4.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v3.4s\n"
+    "movi v17.4s, #0x7f\n"
     "srshl v1.4s, v1.4s, v2.4s\n"
     "srshl v23.4s, v23.4s, v2.4s\n"
     "srshl v0.4s, v0.4s, v2.4s\n"
     "srshl v31.4s, v31.4s, v2.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v1.4s, v1.4s, v16.4s\n"
     "smax v23.4s, v23.4s, v16.4s\n"
     "smax v0.4s, v0.4s, v16.4s\n"
@@ -374,201 +373,201 @@
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v8.16b, #0x80\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v4.16b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 24f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
-    "ld1 { v2.b }[14], [x22], #0x1\n"
-    "ld1 { v1.b }[14], [x21], #0x1\n"
-    "ld1 { v0.b }[14], [x20], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
     "b 23f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
-    "ld1 { v2.b }[12], [x22], #0x1\n"
-    "ld1 { v1.b }[12], [x21], #0x1\n"
-    "ld1 { v0.b }[12], [x20], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
     "b 23f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
-    "ld1 { v2.b }[10], [x22], #0x1\n"
-    "ld1 { v1.b }[10], [x21], #0x1\n"
-    "ld1 { v0.b }[10], [x20], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
     "b 23f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
-    "ld1 { v2.b }[8], [x22], #0x1\n"
-    "ld1 { v1.b }[8], [x21], #0x1\n"
-    "ld1 { v0.b }[8], [x20], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
     "b 23f\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 21f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
-    "ld1 { v2.b }[6], [x22], #0x1\n"
-    "ld1 { v1.b }[6], [x21], #0x1\n"
-    "ld1 { v0.b }[6], [x20], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
     "b 23f\n"
     "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
-    "ld1 { v2.b }[4], [x22], #0x1\n"
-    "ld1 { v1.b }[4], [x21], #0x1\n"
-    "ld1 { v0.b }[4], [x20], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
     "b 23f\n"
     "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 22f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
-    "ld1 { v2.b }[2], [x22], #0x1\n"
-    "ld1 { v1.b }[2], [x21], #0x1\n"
-    "ld1 { v0.b }[2], [x20], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
     "b 23f\n"
     "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ldr b3, [x23], #0x1\n"
-    "ldr b2, [x22], #0x1\n"
-    "ldr b1, [x21], #0x1\n"
-    "ldr b0, [x20], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
     "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
-    "smax v23.16b, v3.16b, v2.16b\n"
+    "smax v23.16b, v2.16b, v1.16b\n"
+    "smax v19.16b, v0.16b, v31.16b\n"
     "subs x24, x24, #0x1\n"
-    "smax v19.16b, v1.16b, v0.16b\n"
     "smax v19.16b, v23.16b, v19.16b\n"
-    "smax v8.16b, v8.16b, v19.16b\n"
+    "smax v4.16b, v4.16b, v19.16b\n"
     "bgt 15b\n"
     "24:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #1, 26f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
     "b 33f\n"
     "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
     "b 33f\n"
     "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 28f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
     "b 33f\n"
     "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
     "b 33f\n"
     "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 31f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #1, 30f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
     "b 33f\n"
     "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
     "b 33f\n"
     "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 32f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
     "b 33f\n"
     "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ldr b3, [x23], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
-    "smax v8.16b, v8.16b, v3.16b\n"
     "subs x20, x20, #0x1\n"
+    "smax v4.16b, v4.16b, v2.16b\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "sxtl v23.8h, v8.8b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "ld1r { v4.4s }, [x19]\n"
-    "sxtl2 v22.8h, v8.16b\n"
+    "sxtl v23.8h, v4.8b\n"
+    "sxtl2 v22.8h, v4.16b\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "movi v17.4s, #0x7f\n"
-    "ld1r { v3.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1r { v4.4s }, [x19]\n"
     "sxtl v1.4s, v23.4h\n"
-    "ld1r { v2.4s }, [x19]\n"
-    "not v16.16b, v17.16b\n"
     "sxtl2 v23.4s, v23.8h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v3.4s }, [x19]\n"
     "sxtl v0.4s, v22.4h\n"
     "sxtl2 v31.4s, v22.8h\n"
-    "srshl v1.4s, v1.4s, v3.4s\n"
-    "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v0.4s, v0.4s, v3.4s\n"
-    "srshl v31.4s, v31.4s, v3.4s\n"
-    "sqrdmulh v1.4s, v1.4s, v4.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v0.4s, v0.4s, v4.4s\n"
-    "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "srshl v1.4s, v1.4s, v4.4s\n"
+    "srshl v23.4s, v23.4s, v4.4s\n"
+    "srshl v0.4s, v0.4s, v4.4s\n"
+    "srshl v31.4s, v31.4s, v4.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v3.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v3.4s\n"
+    "movi v17.4s, #0x7f\n"
     "srshl v1.4s, v1.4s, v2.4s\n"
     "srshl v23.4s, v23.4s, v2.4s\n"
     "srshl v0.4s, v0.4s, v2.4s\n"
     "srshl v31.4s, v31.4s, v2.4s\n"
+    "not v16.16b, v17.16b\n"
     "smax v1.4s, v1.4s, v16.4s\n"
     "smax v23.4s, v23.4s, v16.4s\n"
     "smax v0.4s, v0.4s, v16.4s\n"
@@ -625,16 +624,13 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp
index 2309524..9781859 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_u8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
 
-struct a64_u8_nhwc_avg_generic_depthfirst
+struct a64_u8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_u8_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
   a64_u8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_u8_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
index baf23b4..c9fdf76 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,13 +22,14 @@
  * SOFTWARE.
  */
 
+#if defined(__aarch64__)
+
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(__aarch64__)
-
 namespace arm_conv {
 namespace pooling {
 
@@ -84,27 +85,27 @@
       f_rescale_value *= 2.0f;
     }
 
-    int64_t large_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
-    if (large_rescale_value == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      large_rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
-    rescale_value = static_cast<int32_t>(large_rescale_value);
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   __asm__ __volatile__(
-    "mov x26, #0x0\n"
-    "mov x25, #0x10\n" // cntb _, ALL, #1
-    "mov x24, #0x20\n" // cntb _, ALL, #2
-    "mov x23, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n"  // cntb _, ALL, #1
+    "mov x24, #0x20\n"  // cntb _, ALL, #2
+    "mov x23, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "movi v11.4s, #0x0\n"
@@ -121,10 +122,10 @@
     "movi v0.4s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "ldr q29, [x21, x25]\n"
     "ldr q28, [x20, x25]\n"
     "ldr q27, [x21, x24]\n"
@@ -134,28 +135,28 @@
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
     "uaddl2 v20.8h, v29.16b, v28.16b\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "uaddl v19.8h, v27.8b, v26.8b\n"
-    "ldr q29, [x21, x25]\n"
     "uaddl2 v18.8h, v27.16b, v26.16b\n"
-    "ldr q28, [x20, x25]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
     "uaddl v17.8h, v25.8b, v24.8b\n"
-    "ldr q27, [x21, x24]\n"
     "uaddl2 v16.8h, v25.16b, v24.16b\n"
-    "ldr q26, [x20, x24]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
-    "ldr q25, [x21, x23]\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q24, [x20, x23]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q24, [x20, x23]\n"
     "uaddw v11.4s, v11.4s, v21.4h\n"
     "uaddw2 v10.4s, v10.4s, v21.8h\n"
     "uaddw v9.4s, v9.4s, v20.4h\n"
@@ -199,17 +200,17 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "uxtl v23.8h, v31.8b\n"
-    "ldr q29, [x21, x25]\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "ldr q29, [x21, x25]\n"
     "ldr q27, [x21, x24]\n"
-    "ldr q25, [x21, x23]\n"
     "uxtl v21.8h, v29.8b\n"
     "uxtl2 v20.8h, v29.16b\n"
+    "ldr q25, [x21, x23]\n"
     "uxtl v19.8h, v27.8b\n"
     "uxtl2 v18.8h, v27.16b\n"
+    "subs x20, x20, #0x1\n"
     "uxtl v17.8h, v25.8b\n"
     "uxtl2 v16.8h, v25.16b\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
@@ -230,91 +231,91 @@
     "uaddw2 v0.4s, v0.4s, v16.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "movi v19.4s, #0x0\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x40\n"
-    "movi v17.4s, #0xff\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
     "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
     "cmp %x[n_channels], #0x40\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
-    "sqdmulh v11.4s, v11.4s, v18.4s\n"
+    "sqdmulh v11.4s, v11.4s, v17.4s\n"
+    "sqdmulh v10.4s, v10.4s, v17.4s\n"
+    "sqdmulh v9.4s, v9.4s, v17.4s\n"
+    "sqdmulh v8.4s, v8.4s, v17.4s\n"
+    "sqdmulh v7.4s, v7.4s, v17.4s\n"
+    "sqdmulh v6.4s, v6.4s, v17.4s\n"
+    "sqdmulh v5.4s, v5.4s, v17.4s\n"
+    "sqdmulh v4.4s, v4.4s, v17.4s\n"
+    "sqdmulh v3.4s, v3.4s, v17.4s\n"
+    "sqdmulh v2.4s, v2.4s, v17.4s\n"
+    "sqdmulh v1.4s, v1.4s, v17.4s\n"
+    "sqdmulh v0.4s, v0.4s, v17.4s\n"
     "srshl v15.4s, v15.4s, v16.4s\n"
     "srshl v14.4s, v14.4s, v16.4s\n"
     "srshl v13.4s, v13.4s, v16.4s\n"
     "srshl v12.4s, v12.4s, v16.4s\n"
     "srshl v11.4s, v11.4s, v16.4s\n"
-    "sqdmulh v10.4s, v10.4s, v18.4s\n"
-    "sqdmulh v9.4s, v9.4s, v18.4s\n"
-    "sqdmulh v8.4s, v8.4s, v18.4s\n"
-    "sqdmulh v7.4s, v7.4s, v18.4s\n"
     "srshl v10.4s, v10.4s, v16.4s\n"
     "srshl v9.4s, v9.4s, v16.4s\n"
     "srshl v8.4s, v8.4s, v16.4s\n"
     "srshl v7.4s, v7.4s, v16.4s\n"
-    "sqdmulh v6.4s, v6.4s, v18.4s\n"
-    "sqdmulh v5.4s, v5.4s, v18.4s\n"
-    "sqdmulh v4.4s, v4.4s, v18.4s\n"
-    "sqdmulh v3.4s, v3.4s, v18.4s\n"
     "srshl v6.4s, v6.4s, v16.4s\n"
     "srshl v5.4s, v5.4s, v16.4s\n"
     "srshl v4.4s, v4.4s, v16.4s\n"
     "srshl v3.4s, v3.4s, v16.4s\n"
-    "sqdmulh v2.4s, v2.4s, v18.4s\n"
-    "sqdmulh v1.4s, v1.4s, v18.4s\n"
-    "sqdmulh v0.4s, v0.4s, v18.4s\n"
-    "smax v15.4s, v15.4s, v19.4s\n"
     "srshl v2.4s, v2.4s, v16.4s\n"
     "srshl v1.4s, v1.4s, v16.4s\n"
     "srshl v0.4s, v0.4s, v16.4s\n"
-    "smin v15.4s, v15.4s, v17.4s\n"
-    "smax v14.4s, v14.4s, v19.4s\n"
-    "smax v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v17.4s\n"
-    "smin v13.4s, v13.4s, v17.4s\n"
-    "smin v12.4s, v12.4s, v17.4s\n"
-    "smax v11.4s, v11.4s, v19.4s\n"
-    "smax v10.4s, v10.4s, v19.4s\n"
-    "smax v9.4s, v9.4s, v19.4s\n"
-    "smin v11.4s, v11.4s, v17.4s\n"
-    "smin v10.4s, v10.4s, v17.4s\n"
-    "smin v9.4s, v9.4s, v17.4s\n"
-    "smax v8.4s, v8.4s, v19.4s\n"
-    "smax v7.4s, v7.4s, v19.4s\n"
-    "smax v6.4s, v6.4s, v19.4s\n"
-    "smin v8.4s, v8.4s, v17.4s\n"
-    "smin v7.4s, v7.4s, v17.4s\n"
-    "smin v6.4s, v6.4s, v17.4s\n"
-    "smax v5.4s, v5.4s, v19.4s\n"
-    "smax v4.4s, v4.4s, v19.4s\n"
-    "smax v3.4s, v3.4s, v19.4s\n"
-    "smin v5.4s, v5.4s, v17.4s\n"
-    "smin v4.4s, v4.4s, v17.4s\n"
-    "smin v3.4s, v3.4s, v17.4s\n"
-    "smax v2.4s, v2.4s, v19.4s\n"
-    "smax v1.4s, v1.4s, v19.4s\n"
-    "smax v0.4s, v0.4s, v19.4s\n"
-    "smin v2.4s, v2.4s, v17.4s\n"
-    "smin v1.4s, v1.4s, v17.4s\n"
-    "smin v0.4s, v0.4s, v17.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smax v11.4s, v11.4s, v16.4s\n"
+    "smax v10.4s, v10.4s, v16.4s\n"
+    "smax v9.4s, v9.4s, v16.4s\n"
+    "smax v8.4s, v8.4s, v16.4s\n"
+    "smax v7.4s, v7.4s, v16.4s\n"
+    "smax v6.4s, v6.4s, v16.4s\n"
+    "smax v5.4s, v5.4s, v16.4s\n"
+    "smax v4.4s, v4.4s, v16.4s\n"
+    "smax v3.4s, v3.4s, v16.4s\n"
+    "smax v2.4s, v2.4s, v16.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
+    "smin v11.4s, v11.4s, v16.4s\n"
+    "smin v10.4s, v10.4s, v16.4s\n"
+    "smin v9.4s, v9.4s, v16.4s\n"
+    "smin v8.4s, v8.4s, v16.4s\n"
+    "smin v7.4s, v7.4s, v16.4s\n"
+    "smin v6.4s, v6.4s, v16.4s\n"
+    "smin v5.4s, v5.4s, v16.4s\n"
+    "smin v4.4s, v4.4s, v16.4s\n"
+    "smin v3.4s, v3.4s, v16.4s\n"
+    "smin v2.4s, v2.4s, v16.4s\n"
+    "smin v1.4s, v1.4s, v16.4s\n"
+    "smin v0.4s, v0.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v22.16b, v11.16b, v10.16b\n"
-    "uzp1 v21.16b, v9.16b, v8.16b\n"
-    "uzp1 v20.16b, v7.16b, v6.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
     "uzp1 v17.16b, v5.16b, v4.16b\n"
-    "uzp1 v19.16b, v3.16b, v2.16b\n"
-    "uzp1 v18.16b, v1.16b, v0.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
     "str q16, [%x[outptr], x26]\n"
-    "uzp1 v16.16b, v22.16b, v21.16b\n"
     "add x26, x26, #0x40\n"
-    "uzp1 v17.16b, v20.16b, v17.16b\n"
-    "str q16, [%x[outptr], x25]\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
     "add x25, x25, #0x40\n"
     "str q17, [%x[outptr], x24]\n"
     "add x24, x24, #0x40\n"
@@ -326,31 +327,31 @@
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v15.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v14.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q30, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     "uaddl v23.8h, v31.8b, v30.8b\n"
@@ -364,38 +365,38 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "uxtl v23.8h, v31.8b\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "movi v19.4s, #0x0\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x10\n"
-    "movi v17.4s, #0xff\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
     "ld1r { v16.4s }, [%x[shift_ptr]]\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
     "cmp %x[n_channels], #0x10\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
     "srshl v15.4s, v15.4s, v16.4s\n"
     "srshl v14.4s, v14.4s, v16.4s\n"
     "srshl v13.4s, v13.4s, v16.4s\n"
     "srshl v12.4s, v12.4s, v16.4s\n"
-    "smax v15.4s, v15.4s, v19.4s\n"
-    "smax v14.4s, v14.4s, v19.4s\n"
-    "smax v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v19.4s\n"
-    "smin v15.4s, v15.4s, v17.4s\n"
-    "smin v14.4s, v14.4s, v17.4s\n"
-    "smin v13.4s, v13.4s, v17.4s\n"
-    "smin v12.4s, v12.4s, v17.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -404,21 +405,21 @@
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v15.4s, #0x0\n"
-    "add %x[outptr], %x[outptr], x26\n"
-    "movi v14.4s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v13.4s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "movi v15.4s, #0x0\n"
+    "movi v14.4s, #0x0\n"
+    "movi v13.4s, #0x0\n"
     "movi v12.4s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 24f\n"
     "15:"  // Oddments: 2 inputs loop
-    "movi v31.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x0]\n"
     "add x19, x19, #0x10\n"
-    "movi v30.16b, #0x0\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "add x20, x20, x26\n"
+    "movi v30.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
     "ldr d31, [x21], #0x8\n"
     "ldr d30, [x20], #0x8\n"
@@ -480,8 +481,8 @@
     "ldr b30, [x20], #0x1\n"
     "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "subs x22, x22, #0x1\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
@@ -491,9 +492,9 @@
     "ands x20, %x[n_valid_cells], #0x1\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v31.16b, #0x0\n"
     "ldr x21, [x19], #0x8\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
     "ldr d31, [x21], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
@@ -540,34 +541,34 @@
     "ldr b31, [x21], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
     "uxtl v23.8h, v31.8b\n"
-    "subs x20, x20, #0x1\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "movi v19.4s, #0x0\n"
-    "ld1r { v18.4s }, [%x[rescale_ptr]]\n"
-    "movi v17.4s, #0xff\n"
+    "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
     "ld1r { v16.4s }, [%x[shift_ptr]]\n"
-    "sqdmulh v15.4s, v15.4s, v18.4s\n"
-    "sqdmulh v14.4s, v14.4s, v18.4s\n"
-    "sqdmulh v13.4s, v13.4s, v18.4s\n"
-    "sqdmulh v12.4s, v12.4s, v18.4s\n"
+    "sqdmulh v15.4s, v15.4s, v17.4s\n"
+    "sqdmulh v14.4s, v14.4s, v17.4s\n"
+    "sqdmulh v13.4s, v13.4s, v17.4s\n"
+    "sqdmulh v12.4s, v12.4s, v17.4s\n"
     "srshl v15.4s, v15.4s, v16.4s\n"
     "srshl v14.4s, v14.4s, v16.4s\n"
     "srshl v13.4s, v13.4s, v16.4s\n"
     "srshl v12.4s, v12.4s, v16.4s\n"
-    "smax v15.4s, v15.4s, v19.4s\n"
-    "smax v14.4s, v14.4s, v19.4s\n"
-    "smax v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v19.4s\n"
-    "smin v15.4s, v15.4s, v17.4s\n"
-    "smin v14.4s, v14.4s, v17.4s\n"
-    "smin v13.4s, v13.4s, v17.4s\n"
-    "smin v12.4s, v12.4s, v17.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
@@ -616,9 +617,7 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
     : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
@@ -627,5 +626,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 0103de8..556d833 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,27 +29,18 @@
 
 void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst
+struct a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
+  using Parent = DepthfirstStrategy<uint8_t, uint8_t>;
 
-  typedef void (*kern_type)(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 9d379d1..06ded77 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -63,12 +63,12 @@
 
   __asm__ __volatile__(
     "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x14, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x13, #0x0\n"
-    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
     "cmp x15, #0x10\n"
-    "ldp x12, x11, [x20, #0x0]\n"
+    "mov x14, #0x0\n"
+    "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x13, x12, [x20, #0x0]\n"
+    "mov x11, #0x0\n"
     "ldp x10, x9, [x20, #0x10]\n"
     "ldp x28, x27, [x19, #0x0]\n"
     "ldp x26, x25, [x19, #0x10]\n"
@@ -76,12 +76,12 @@
     "ldp x22, x21, [x19, #0x30]\n"
     "ldr x20, [x19, #0x40]\n"
     "blt 3f\n"
-    "ldr q30, [x27, x14]\n"
     "lsr x19, x15, #0x4\n"
-    "ldr q29, [x24, x14]\n"
     "sub x15, x15, x19, LSL #4\n"
-    "ldr q28, [x21, x14]\n"
+    "ldr q30, [x27, x14]\n"
+    "ldr q29, [x24, x14]\n"
     "subs x19, x19, #0x1\n"
+    "ldr q28, [x21, x14]\n"
     "ldr q27, [x25, x14]\n"
     "ldr q26, [x28, x14]\n"
     "ldr q25, [x23, x14]\n"
@@ -92,31 +92,31 @@
     "beq 2f\n"
     "1:"  // Vector: Loop
     "umax v21.16b, v30.16b, v29.16b\n"
-    "ldr q30, [x27, x14]\n"
-    "subs x19, x19, #0x1\n"
     "umax v20.16b, v29.16b, v28.16b\n"
-    "ldr q29, [x24, x14]\n"
+    "subs x19, x19, #0x1\n"
+    "ldr q30, [x27, x14]\n"
     "umax v19.16b, v27.16b, v26.16b\n"
-    "ldr q28, [x21, x14]\n"
     "umax v18.16b, v25.16b, v24.16b\n"
-    "ldr q26, [x28, x14]\n"
+    "ldr q29, [x24, x14]\n"
+    "ldr q28, [x21, x14]\n"
     "umax v17.16b, v23.16b, v27.16b\n"
-    "ldr q27, [x25, x14]\n"
     "umax v16.16b, v25.16b, v22.16b\n"
-    "ldr q25, [x23, x14]\n"
+    "ldr q27, [x25, x14]\n"
+    "ldr q26, [x28, x14]\n"
     "umax v19.16b, v21.16b, v19.16b\n"
+    "umax v18.16b, v18.16b, v21.16b\n"
+    "ldr q25, [x23, x14]\n"
     "ldr q24, [x26, x14]\n"
-    "umax v18.16b, v21.16b, v18.16b\n"
-    "ldr q23, [x22, x14]\n"
     "umax v17.16b, v20.16b, v17.16b\n"
+    "umax v16.16b, v20.16b, v16.16b\n"
+    "ldr q23, [x22, x14]\n"
     "ldr q22, [x20, x14]\n"
     "add x14, x14, #0x10\n"
-    "umax v16.16b, v20.16b, v16.16b\n"
-    "str q19, [x12, x13]\n"
-    "str q18, [x11, x13]\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q19, [x13, x11]\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "bgt 1b\n"
     "2:"  // Vector: Tail
     "umax v21.16b, v30.16b, v29.16b\n"
@@ -126,45 +126,44 @@
     "umax v17.16b, v23.16b, v27.16b\n"
     "umax v16.16b, v25.16b, v22.16b\n"
     "umax v19.16b, v21.16b, v19.16b\n"
-    "str q19, [x12, x13]\n"
-    "umax v18.16b, v21.16b, v18.16b\n"
+    "umax v18.16b, v18.16b, v21.16b\n"
+    "str q19, [x13, x11]\n"
     "umax v17.16b, v20.16b, v17.16b\n"
-    "str q18, [x11, x13]\n"
     "umax v16.16b, v20.16b, v16.16b\n"
-    "str q17, [x10, x13]\n"
-    "str q16, [x9, x13]\n"
-    "add x13, x13, #0x10\n"
+    "str q18, [x12, x11]\n"
+    "str q17, [x10, x11]\n"
+    "str q16, [x9, x11]\n"
+    "add x11, x11, #0x10\n"
     "cbz x15, 4f\n"
     "3:"  // Oddments
     "ldr b30, [x27, x14]\n"
-    "subs x15, x15, #0x1\n"
     "ldr b29, [x24, x14]\n"
     "umax v21.16b, v30.16b, v29.16b\n"
+    "subs x15, x15, #0x1\n"
     "ldr b28, [x21, x14]\n"
     "ldr b27, [x25, x14]\n"
     "umax v20.16b, v29.16b, v28.16b\n"
     "ldr b26, [x28, x14]\n"
     "ldr b25, [x23, x14]\n"
     "umax v19.16b, v27.16b, v26.16b\n"
+    "umax v19.16b, v21.16b, v19.16b\n"
     "ldr b24, [x26, x14]\n"
     "ldr b23, [x22, x14]\n"
-    "umax v19.16b, v21.16b, v19.16b\n"
-    "ldr b22, [x20, x14]\n"
-    "add x14, x14, #0x1\n"
     "umax v18.16b, v25.16b, v24.16b\n"
-    "str b19, [x12, x13]\n"
     "umax v17.16b, v23.16b, v27.16b\n"
+    "ldr b22, [x20, x14]\n"
     "umax v16.16b, v25.16b, v22.16b\n"
-    "umax v18.16b, v21.16b, v18.16b\n"
-    "str b18, [x11, x13]\n"
+    "add x14, x14, #0x1\n"
+    "umax v18.16b, v18.16b, v21.16b\n"
     "umax v17.16b, v20.16b, v17.16b\n"
     "umax v16.16b, v20.16b, v16.16b\n"
-    "str b17, [x10, x13]\n"
-    "str b16, [x9, x13]\n"
-    "add x13, x13, #0x1\n"
+    "str b19, [x13, x11]\n"
+    "str b18, [x12, x11]\n"
+    "str b17, [x10, x11]\n"
+    "str b16, [x9, x11]\n"
+    "add x11, x11, #0x1\n"
     "bgt 3b\n"
     "4:"  // End
-
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
@@ -173,4 +172,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-#endif // defined(__aarch64__)
+#endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp
index 391af31..7d528cc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_u8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
 
-struct a64_u8_nhwc_max_generic_depthfirst
+struct a64_u8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_u8_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
   a64_u8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_u8_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
index f9bbfd8..355f217 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,10 +22,11 @@
  * SOFTWARE.
  */
 
-#include <cstdint>
-
 #if defined(__aarch64__)
 
+#include <cstdint>
+#include <cstddef>
+
 namespace arm_conv {
 namespace pooling {
 
@@ -39,30 +40,30 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v6.16b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
     "movi v5.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v4.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -75,47 +76,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "umax v22.16b, v31.16b, v30.16b\n"
-    "ldr q3, [x23, x28]\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
     "umax v18.16b, v29.16b, v28.16b\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "umax v21.16b, v27.16b, v21.16b\n"
-    "ldr q2, [x22, x28]\n"
     "umax v17.16b, v26.16b, v17.16b\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "umax v20.16b, v25.16b, v20.16b\n"
-    "ldr q0, [x20, x28]\n"
     "umax v16.16b, v24.16b, v16.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "ldr q30, [x22, x27]\n"
     "umax v18.16b, v22.16b, v18.16b\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "umax v17.16b, v21.16b, v17.16b\n"
-    "ldr q28, [x20, x27]\n"
     "umax v16.16b, v20.16b, v16.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "umax v5.16b, v5.16b, v18.16b\n"
     "ldr q27, [x23, x26]\n"
-    "umax v7.16b, v7.16b, v19.16b\n"
     "ldr q21, [x22, x26]\n"
-    "umax v6.16b, v6.16b, v18.16b\n"
+    "umax v4.16b, v4.16b, v17.16b\n"
+    "umax v3.16b, v3.16b, v16.16b\n"
     "ldr q26, [x21, x26]\n"
-    "umax v5.16b, v5.16b, v17.16b\n"
     "ldr q17, [x20, x26]\n"
-    "umax v4.16b, v4.16b, v16.16b\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "umax v23.16b, v3.16b, v2.16b\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
-    "umax v22.16b, v31.16b, v30.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
     "umax v18.16b, v29.16b, v28.16b\n"
     "umax v21.16b, v27.16b, v21.16b\n"
     "umax v17.16b, v26.16b, v17.16b\n"
@@ -125,315 +126,312 @@
     "umax v18.16b, v22.16b, v18.16b\n"
     "umax v17.16b, v21.16b, v17.16b\n"
     "umax v16.16b, v20.16b, v16.16b\n"
-    "umax v7.16b, v7.16b, v19.16b\n"
-    "umax v6.16b, v6.16b, v18.16b\n"
-    "umax v5.16b, v5.16b, v17.16b\n"
-    "umax v4.16b, v4.16b, v16.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "umax v5.16b, v5.16b, v18.16b\n"
+    "umax v4.16b, v4.16b, v17.16b\n"
+    "umax v3.16b, v3.16b, v16.16b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "umax v7.16b, v7.16b, v3.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "umax v6.16b, v6.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "umax v6.16b, v6.16b, v31.16b\n"
+    "umax v5.16b, v5.16b, v30.16b\n"
+    "umax v4.16b, v4.16b, v27.16b\n"
     "ldr q25, [x23, x25]\n"
-    "umax v5.16b, v5.16b, v27.16b\n"
-    "umax v4.16b, v4.16b, v25.16b\n"
+    "umax v3.16b, v3.16b, v25.16b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x40\n"
-    "str q6, [%x[outptr], x27]\n"
-    "add x27, x27, #0x40\n"
-    "str q5, [%x[outptr], x26]\n"
-    "add x26, x26, #0x40\n"
-    "str q4, [%x[outptr], x25]\n"
-    "add x25, x25, #0x40\n"
     "sub %x[n_channels], %x[n_channels], #0x40\n"
     "cmp %x[n_channels], #0x40\n"
+    "str q6, [%x[outptr], x28]\n"
+    "str q5, [%x[outptr], x27]\n"
+    "add x28, x28, #0x40\n"
+    "add x27, x27, #0x40\n"
+    "str q4, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
+    "str q3, [%x[outptr], x25]\n"
+    "add x25, x25, #0x40\n"
     "bge 1b\n"
     "cbz %x[n_channels], 43f\n"
     "7:"  // Single vector of channels
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v7.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "umax v7.16b, v7.16b, v19.16b\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "umax v23.16b, v3.16b, v2.16b\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "umax v7.16b, v7.16b, v19.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "umax v7.16b, v7.16b, v3.16b\n"
+    "umax v6.16b, v6.16b, v2.16b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "str q7, [%x[outptr], x28]\n"
-    "add x28, x28, #0x10\n"
     "sub %x[n_channels], %x[n_channels], #0x10\n"
     "cmp %x[n_channels], #0x10\n"
+    "str q6, [%x[outptr], x28]\n"
+    "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v7.16b, #0x0\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v6.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 24f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
-    "ld1 { v2.b }[14], [x22], #0x1\n"
-    "ld1 { v1.b }[14], [x21], #0x1\n"
-    "ld1 { v0.b }[14], [x20], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
     "b 23f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
-    "ld1 { v2.b }[12], [x22], #0x1\n"
-    "ld1 { v1.b }[12], [x21], #0x1\n"
-    "ld1 { v0.b }[12], [x20], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
     "b 23f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
-    "ld1 { v2.b }[10], [x22], #0x1\n"
-    "ld1 { v1.b }[10], [x21], #0x1\n"
-    "ld1 { v0.b }[10], [x20], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
     "b 23f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
-    "ld1 { v2.b }[8], [x22], #0x1\n"
-    "ld1 { v1.b }[8], [x21], #0x1\n"
-    "ld1 { v0.b }[8], [x20], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
     "b 23f\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 21f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
-    "ld1 { v2.b }[6], [x22], #0x1\n"
-    "ld1 { v1.b }[6], [x21], #0x1\n"
-    "ld1 { v0.b }[6], [x20], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
     "b 23f\n"
     "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
-    "ld1 { v2.b }[4], [x22], #0x1\n"
-    "ld1 { v1.b }[4], [x21], #0x1\n"
-    "ld1 { v0.b }[4], [x20], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
     "b 23f\n"
     "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 22f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
-    "ld1 { v2.b }[2], [x22], #0x1\n"
-    "ld1 { v1.b }[2], [x21], #0x1\n"
-    "ld1 { v0.b }[2], [x20], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
     "b 23f\n"
     "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ldr b3, [x23], #0x1\n"
-    "ldr b2, [x22], #0x1\n"
-    "ldr b1, [x21], #0x1\n"
-    "ldr b0, [x20], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
     "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "umax v7.16b, v7.16b, v19.16b\n"
+    "umax v6.16b, v6.16b, v19.16b\n"
     "bgt 15b\n"
     "24:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #1, 26f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
     "b 33f\n"
     "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
     "b 33f\n"
     "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 28f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
     "b 33f\n"
     "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
     "b 33f\n"
     "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 31f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #1, 30f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
     "b 33f\n"
     "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
     "b 33f\n"
     "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 32f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
     "b 33f\n"
     "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ldr b3, [x23], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
-    "umax v7.16b, v7.16b, v3.16b\n"
     "subs x20, x20, #0x1\n"
+    "umax v6.16b, v6.16b, v2.16b\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
     "tbz %x[n_channels], #3, 38f\n"
-    "st1 { v7.d }[0], [%x[outptr]], #0x8\n"
+    "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #2, 36f\n"
-    "st1 { v7.s }[2], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #1, 35f\n"
-    "st1 { v7.h }[6], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[14], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "35:"  // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[12], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "36:"  // Oddments: Store: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 37f\n"
-    "st1 { v7.h }[4], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[10], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "37:"  // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[8], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "38:"  // Oddments: Store: Bit 3: Unset
     "tbz %x[n_channels], #2, 40f\n"
-    "st1 { v7.s }[0], [%x[outptr]], #0x4\n"
+    "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
     "tbz %x[n_channels], #1, 39f\n"
-    "st1 { v7.h }[2], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[6], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "39:"  // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[4], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "40:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 41f\n"
-    "st1 { v7.h }[0], [%x[outptr]], #0x2\n"
+    "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[2], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
     "b 42f\n"
     "41:"  // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 42f\n"
-    "st1 { v7.b }[0], [%x[outptr]], #0x1\n"
+    "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp
index d46658f..daf836f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_u8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
 
-struct a64_u8q_nhwc_avg_generic_depthfirst
+struct a64_u8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = a64_u8q_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
   a64_u8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_u8q_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index 11376e0..d48c4ec 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,15 +22,15 @@
  * SOFTWARE.
  */
 
+#if defined(__aarch64__)
+
 #include "pooling.hpp"
-#include <cstddef>
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(__aarch64__)
-
 namespace arm_conv {
 namespace pooling {
 
@@ -87,13 +87,13 @@
       f_rescale_value *= 2.0f;
     }
 
-    int64_t large_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
-    if (large_rescale_value == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      large_rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
-    rescale_value = static_cast<int32_t>(large_rescale_value);
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
 
@@ -119,20 +119,20 @@
   );
 
   __asm__ __volatile__(
-    "mov x26, #0x0\n"
-    "mov x25, #0x10\n" // cntb _, ALL, #1
-    "mov x24, #0x20\n" // cntb _, ALL, #2
-    "mov x23, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x26, #0x0\n"
+    "mov x25, #0x10\n"  // cntb _, ALL, #1
+    "mov x24, #0x20\n"  // cntb _, ALL, #2
+    "mov x23, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
     "ld1r { v15.4s }, [%x[accumulator_init]]\n"
-    "mov v14.16b, v15.16b\n"
-    "mov x19, %x[inptrs]\n"
-    "mov v13.16b, v15.16b\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov v14.16b, v15.16b\n"
+    "mov v13.16b, v15.16b\n"
     "mov v12.16b, v15.16b\n"
     "mov v11.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
     "mov v10.16b, v15.16b\n"
     "mov v9.16b, v15.16b\n"
     "mov v8.16b, v15.16b\n"
@@ -146,10 +146,10 @@
     "mov v0.16b, v15.16b\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "ldr q29, [x21, x25]\n"
     "ldr q28, [x20, x25]\n"
     "ldr q27, [x21, x24]\n"
@@ -159,28 +159,28 @@
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "uaddl v21.8h, v29.8b, v28.8b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "uaddl v21.8h, v29.8b, v28.8b\n"
     "uaddl2 v20.8h, v29.16b, v28.16b\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "uaddl v19.8h, v27.8b, v26.8b\n"
-    "ldr q29, [x21, x25]\n"
     "uaddl2 v18.8h, v27.16b, v26.16b\n"
-    "ldr q28, [x20, x25]\n"
+    "ldr q30, [x20, x26]\n"
+    "ldr q29, [x21, x25]\n"
     "uaddl v17.8h, v25.8b, v24.8b\n"
-    "ldr q27, [x21, x24]\n"
     "uaddl2 v16.8h, v25.16b, v24.16b\n"
-    "ldr q26, [x20, x24]\n"
+    "ldr q28, [x20, x25]\n"
+    "ldr q27, [x21, x24]\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
-    "ldr q25, [x21, x23]\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q24, [x20, x23]\n"
+    "ldr q26, [x20, x24]\n"
+    "ldr q25, [x21, x23]\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q24, [x20, x23]\n"
     "uaddw v11.4s, v11.4s, v21.4h\n"
     "uaddw2 v10.4s, v10.4s, v21.8h\n"
     "uaddw v9.4s, v9.4s, v20.4h\n"
@@ -224,17 +224,17 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "uxtl v23.8h, v31.8b\n"
-    "ldr q29, [x21, x25]\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "ldr q29, [x21, x25]\n"
     "ldr q27, [x21, x24]\n"
-    "ldr q25, [x21, x23]\n"
     "uxtl v21.8h, v29.8b\n"
     "uxtl2 v20.8h, v29.16b\n"
+    "ldr q25, [x21, x23]\n"
     "uxtl v19.8h, v27.8b\n"
     "uxtl2 v18.8h, v27.16b\n"
+    "subs x20, x20, #0x1\n"
     "uxtl v17.8h, v25.8b\n"
     "uxtl2 v16.8h, v25.16b\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
@@ -255,64 +255,62 @@
     "uaddw2 v0.4s, v0.4s, v16.8h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "movi v21.4s, #0x0\n"
-    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "movi v19.4s, #0xff\n"
-    "ld1r { v18.4s }, [%x[left_shift]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x40\n"
-    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v19.4s }, [%x[left_shift]]\n"
+    "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
+    "srshl v15.4s, v15.4s, v19.4s\n"
+    "srshl v14.4s, v14.4s, v19.4s\n"
+    "srshl v13.4s, v13.4s, v19.4s\n"
+    "srshl v12.4s, v12.4s, v19.4s\n"
     "ld1r { v17.4s }, [%x[right_shift]]\n"
-    "cmp %x[n_channels], #0x40\n"
-    "srshl v14.4s, v14.4s, v18.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "srshl v11.4s, v11.4s, v19.4s\n"
+    "srshl v10.4s, v10.4s, v19.4s\n"
     "ld1r { v16.4s }, [x19]\n"
-    "srshl v13.4s, v13.4s, v18.4s\n"
-    "srshl v12.4s, v12.4s, v18.4s\n"
-    "srshl v11.4s, v11.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x40\n"
+    "srshl v9.4s, v9.4s, v19.4s\n"
+    "srshl v8.4s, v8.4s, v19.4s\n"
+    "cmp %x[n_channels], #0x40\n"
+    "srshl v7.4s, v7.4s, v19.4s\n"
+    "srshl v6.4s, v6.4s, v19.4s\n"
+    "srshl v5.4s, v5.4s, v19.4s\n"
+    "srshl v4.4s, v4.4s, v19.4s\n"
+    "srshl v3.4s, v3.4s, v19.4s\n"
+    "srshl v2.4s, v2.4s, v19.4s\n"
+    "srshl v1.4s, v1.4s, v19.4s\n"
+    "srshl v0.4s, v0.4s, v19.4s\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+    "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+    "sqrdmulh v9.4s, v9.4s, v18.4s\n"
+    "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+    "sqrdmulh v7.4s, v7.4s, v18.4s\n"
+    "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+    "sqrdmulh v5.4s, v5.4s, v18.4s\n"
+    "sqrdmulh v4.4s, v4.4s, v18.4s\n"
+    "sqrdmulh v3.4s, v3.4s, v18.4s\n"
+    "sqrdmulh v2.4s, v2.4s, v18.4s\n"
+    "sqrdmulh v1.4s, v1.4s, v18.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v18.4s\n"
     "srshl v15.4s, v15.4s, v17.4s\n"
     "srshl v14.4s, v14.4s, v17.4s\n"
     "srshl v13.4s, v13.4s, v17.4s\n"
     "srshl v12.4s, v12.4s, v17.4s\n"
-    "sqrdmulh v11.4s, v11.4s, v20.4s\n"
-    "srshl v10.4s, v10.4s, v18.4s\n"
-    "srshl v9.4s, v9.4s, v18.4s\n"
-    "srshl v8.4s, v8.4s, v18.4s\n"
     "srshl v11.4s, v11.4s, v17.4s\n"
-    "sqrdmulh v10.4s, v10.4s, v20.4s\n"
-    "sqrdmulh v9.4s, v9.4s, v20.4s\n"
-    "sqrdmulh v8.4s, v8.4s, v20.4s\n"
-    "srshl v7.4s, v7.4s, v18.4s\n"
     "srshl v10.4s, v10.4s, v17.4s\n"
     "srshl v9.4s, v9.4s, v17.4s\n"
     "srshl v8.4s, v8.4s, v17.4s\n"
-    "sqrdmulh v7.4s, v7.4s, v20.4s\n"
-    "srshl v6.4s, v6.4s, v18.4s\n"
-    "srshl v5.4s, v5.4s, v18.4s\n"
-    "srshl v4.4s, v4.4s, v18.4s\n"
     "srshl v7.4s, v7.4s, v17.4s\n"
-    "sqrdmulh v6.4s, v6.4s, v20.4s\n"
-    "sqrdmulh v5.4s, v5.4s, v20.4s\n"
-    "sqrdmulh v4.4s, v4.4s, v20.4s\n"
-    "srshl v3.4s, v3.4s, v18.4s\n"
     "srshl v6.4s, v6.4s, v17.4s\n"
     "srshl v5.4s, v5.4s, v17.4s\n"
     "srshl v4.4s, v4.4s, v17.4s\n"
-    "sqrdmulh v3.4s, v3.4s, v20.4s\n"
-    "srshl v2.4s, v2.4s, v18.4s\n"
-    "srshl v1.4s, v1.4s, v18.4s\n"
-    "srshl v0.4s, v0.4s, v18.4s\n"
     "srshl v3.4s, v3.4s, v17.4s\n"
-    "sqrdmulh v2.4s, v2.4s, v20.4s\n"
-    "sqrdmulh v1.4s, v1.4s, v20.4s\n"
-    "sqrdmulh v0.4s, v0.4s, v20.4s\n"
-    "add v15.4s, v15.4s, v16.4s\n"
     "srshl v2.4s, v2.4s, v17.4s\n"
     "srshl v1.4s, v1.4s, v17.4s\n"
     "srshl v0.4s, v0.4s, v17.4s\n"
+    "add v15.4s, v15.4s, v16.4s\n"
     "add v14.4s, v14.4s, v16.4s\n"
     "add v13.4s, v13.4s, v16.4s\n"
     "add v12.4s, v12.4s, v16.4s\n"
@@ -328,53 +326,55 @@
     "add v2.4s, v2.4s, v16.4s\n"
     "add v1.4s, v1.4s, v16.4s\n"
     "add v0.4s, v0.4s, v16.4s\n"
-    "smax v15.4s, v15.4s, v21.4s\n"
-    "smax v14.4s, v14.4s, v21.4s\n"
-    "smax v13.4s, v13.4s, v21.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v21.4s\n"
-    "smax v11.4s, v11.4s, v21.4s\n"
-    "smax v10.4s, v10.4s, v21.4s\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
-    "smin v11.4s, v11.4s, v19.4s\n"
-    "smin v10.4s, v10.4s, v19.4s\n"
-    "smax v9.4s, v9.4s, v21.4s\n"
-    "smax v8.4s, v8.4s, v21.4s\n"
-    "smax v7.4s, v7.4s, v21.4s\n"
-    "smin v9.4s, v9.4s, v19.4s\n"
-    "smin v8.4s, v8.4s, v19.4s\n"
-    "smin v7.4s, v7.4s, v19.4s\n"
-    "smax v6.4s, v6.4s, v21.4s\n"
-    "smax v5.4s, v5.4s, v21.4s\n"
-    "smax v4.4s, v4.4s, v21.4s\n"
-    "smin v6.4s, v6.4s, v19.4s\n"
-    "smin v5.4s, v5.4s, v19.4s\n"
-    "smin v4.4s, v4.4s, v19.4s\n"
-    "smax v3.4s, v3.4s, v21.4s\n"
-    "smax v2.4s, v2.4s, v21.4s\n"
-    "smax v1.4s, v1.4s, v21.4s\n"
-    "smin v3.4s, v3.4s, v19.4s\n"
-    "smin v2.4s, v2.4s, v19.4s\n"
-    "smin v1.4s, v1.4s, v19.4s\n"
-    "smax v0.4s, v0.4s, v21.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "smax v11.4s, v11.4s, v16.4s\n"
+    "smax v10.4s, v10.4s, v16.4s\n"
+    "smax v9.4s, v9.4s, v16.4s\n"
+    "smax v8.4s, v8.4s, v16.4s\n"
+    "smax v7.4s, v7.4s, v16.4s\n"
+    "smax v6.4s, v6.4s, v16.4s\n"
+    "smax v5.4s, v5.4s, v16.4s\n"
+    "smax v4.4s, v4.4s, v16.4s\n"
+    "smax v3.4s, v3.4s, v16.4s\n"
+    "smax v2.4s, v2.4s, v16.4s\n"
+    "smax v1.4s, v1.4s, v16.4s\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
+    "smin v11.4s, v11.4s, v16.4s\n"
+    "smin v10.4s, v10.4s, v16.4s\n"
+    "smin v9.4s, v9.4s, v16.4s\n"
+    "smin v8.4s, v8.4s, v16.4s\n"
+    "smin v7.4s, v7.4s, v16.4s\n"
+    "smin v6.4s, v6.4s, v16.4s\n"
+    "smin v5.4s, v5.4s, v16.4s\n"
+    "smin v4.4s, v4.4s, v16.4s\n"
+    "smin v3.4s, v3.4s, v16.4s\n"
+    "smin v2.4s, v2.4s, v16.4s\n"
+    "smin v1.4s, v1.4s, v16.4s\n"
+    "smin v0.4s, v0.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
-    "smin v0.4s, v0.4s, v19.4s\n"
     "uzp1 v22.16b, v11.16b, v10.16b\n"
-    "uzp1 v21.16b, v9.16b, v8.16b\n"
-    "uzp1 v20.16b, v7.16b, v6.16b\n"
+    "uzp1 v18.16b, v9.16b, v8.16b\n"
+    "uzp1 v21.16b, v7.16b, v6.16b\n"
     "uzp1 v17.16b, v5.16b, v4.16b\n"
-    "uzp1 v19.16b, v3.16b, v2.16b\n"
-    "uzp1 v18.16b, v1.16b, v0.16b\n"
+    "uzp1 v20.16b, v3.16b, v2.16b\n"
+    "uzp1 v19.16b, v1.16b, v0.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
+    "uzp1 v18.16b, v22.16b, v18.16b\n"
     "str q16, [%x[outptr], x26]\n"
-    "uzp1 v16.16b, v22.16b, v21.16b\n"
     "add x26, x26, #0x40\n"
-    "uzp1 v17.16b, v20.16b, v17.16b\n"
-    "str q16, [%x[outptr], x25]\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "uzp1 v17.16b, v21.16b, v17.16b\n"
+    "uzp1 v16.16b, v20.16b, v19.16b\n"
+    "str q18, [%x[outptr], x25]\n"
     "add x25, x25, #0x40\n"
     "str q17, [%x[outptr], x24]\n"
     "add x24, x24, #0x40\n"
@@ -387,30 +387,30 @@
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
     "ld1r { v15.4s }, [%x[accumulator_init]]\n"
-    "mov v14.16b, v15.16b\n"
-    "mov x19, %x[inptrs]\n"
-    "mov v13.16b, v15.16b\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov v14.16b, v15.16b\n"
+    "mov v13.16b, v15.16b\n"
     "mov v12.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ldr q31, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ldr q30, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
+    "ldr q30, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
-    "ldr q31, [x21, x26]\n"
-    "uaddw v15.4s, v15.4s, v23.4h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
-    "ldr q30, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ldr q31, [x21, x26]\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
+    "ldr q30, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     "uaddl v23.8h, v31.8b, v30.8b\n"
@@ -424,33 +424,31 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
-    "subs x20, x20, #0x1\n"
     "ldr q31, [x21, x26]\n"
     "uxtl v23.8h, v31.8b\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "movi v21.4s, #0x0\n"
-    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "movi v19.4s, #0xff\n"
-    "ld1r { v18.4s }, [%x[left_shift]]\n"
-    "sub %x[n_channels], %x[n_channels], #0x10\n"
-    "srshl v15.4s, v15.4s, v18.4s\n"
+    "ld1r { v19.4s }, [%x[left_shift]]\n"
+    "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
+    "srshl v15.4s, v15.4s, v19.4s\n"
+    "srshl v14.4s, v14.4s, v19.4s\n"
+    "srshl v13.4s, v13.4s, v19.4s\n"
+    "srshl v12.4s, v12.4s, v19.4s\n"
     "ld1r { v17.4s }, [%x[right_shift]]\n"
-    "cmp %x[n_channels], #0x10\n"
-    "srshl v14.4s, v14.4s, v18.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
     "ld1r { v16.4s }, [x19]\n"
-    "srshl v13.4s, v13.4s, v18.4s\n"
-    "srshl v12.4s, v12.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
+    "cmp %x[n_channels], #0x10\n"
     "srshl v15.4s, v15.4s, v17.4s\n"
     "srshl v14.4s, v14.4s, v17.4s\n"
     "srshl v13.4s, v13.4s, v17.4s\n"
@@ -459,15 +457,17 @@
     "add v14.4s, v14.4s, v16.4s\n"
     "add v13.4s, v13.4s, v16.4s\n"
     "add v12.4s, v12.4s, v16.4s\n"
-    "smax v15.4s, v15.4s, v21.4s\n"
-    "smax v14.4s, v14.4s, v21.4s\n"
-    "smax v13.4s, v13.4s, v21.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v21.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
     "str q16, [%x[outptr], x26]\n"
@@ -476,20 +476,20 @@
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
     "ld1r { v15.4s }, [%x[accumulator_init]]\n"
-    "mov v14.16b, v15.16b\n"
-    "add %x[outptr], %x[outptr], x26\n"
-    "mov v13.16b, v15.16b\n"
-    "mov x19, %x[inptrs]\n"
-    "mov v12.16b, v15.16b\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "add %x[outptr], %x[outptr], x26\n"
+    "mov v14.16b, v15.16b\n"
+    "mov v13.16b, v15.16b\n"
+    "mov v12.16b, v15.16b\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 24f\n"
     "15:"  // Oddments: 2 inputs loop
-    "movi v31.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x0]\n"
     "add x19, x19, #0x10\n"
-    "movi v30.16b, #0x0\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "add x20, x20, x26\n"
+    "movi v30.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
     "ldr d31, [x21], #0x8\n"
     "ldr d30, [x20], #0x8\n"
@@ -551,8 +551,8 @@
     "ldr b30, [x20], #0x1\n"
     "23:"  // Oddments: 2 inputs loop: Load: Bit 3: End
     "uaddl v23.8h, v31.8b, v30.8b\n"
-    "subs x22, x22, #0x1\n"
     "uaddl2 v22.8h, v31.16b, v30.16b\n"
+    "subs x22, x22, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
@@ -562,9 +562,9 @@
     "ands x20, %x[n_valid_cells], #0x1\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v31.16b, #0x0\n"
     "ldr x21, [x19], #0x8\n"
     "add x21, x21, x26\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
     "ldr d31, [x21], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
@@ -611,29 +611,27 @@
     "ldr b31, [x21], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
     "uxtl v23.8h, v31.8b\n"
-    "subs x20, x20, #0x1\n"
     "uxtl2 v22.8h, v31.16b\n"
+    "subs x20, x20, #0x1\n"
     "uaddw v15.4s, v15.4s, v23.4h\n"
     "uaddw2 v14.4s, v14.4s, v23.8h\n"
     "uaddw v13.4s, v13.4s, v22.4h\n"
     "uaddw2 v12.4s, v12.4s, v22.8h\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "movi v21.4s, #0x0\n"
-    "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "movi v19.4s, #0xff\n"
-    "ld1r { v18.4s }, [%x[left_shift]]\n"
+    "ld1r { v19.4s }, [%x[left_shift]]\n"
+    "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
+    "srshl v15.4s, v15.4s, v19.4s\n"
+    "srshl v14.4s, v14.4s, v19.4s\n"
+    "srshl v13.4s, v13.4s, v19.4s\n"
+    "srshl v12.4s, v12.4s, v19.4s\n"
     "ld1r { v17.4s }, [%x[right_shift]]\n"
-    "srshl v15.4s, v15.4s, v18.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+    "sqrdmulh v14.4s, v14.4s, v18.4s\n"
     "ld1r { v16.4s }, [x19]\n"
-    "srshl v14.4s, v14.4s, v18.4s\n"
-    "srshl v13.4s, v13.4s, v18.4s\n"
-    "srshl v12.4s, v12.4s, v18.4s\n"
-    "sqrdmulh v15.4s, v15.4s, v20.4s\n"
-    "sqrdmulh v14.4s, v14.4s, v20.4s\n"
-    "sqrdmulh v13.4s, v13.4s, v20.4s\n"
-    "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+    "sqrdmulh v13.4s, v13.4s, v18.4s\n"
+    "sqrdmulh v12.4s, v12.4s, v18.4s\n"
     "srshl v15.4s, v15.4s, v17.4s\n"
     "srshl v14.4s, v14.4s, v17.4s\n"
     "srshl v13.4s, v13.4s, v17.4s\n"
@@ -642,15 +640,17 @@
     "add v14.4s, v14.4s, v16.4s\n"
     "add v13.4s, v13.4s, v16.4s\n"
     "add v12.4s, v12.4s, v16.4s\n"
-    "smax v15.4s, v15.4s, v21.4s\n"
-    "smax v14.4s, v14.4s, v21.4s\n"
-    "smax v13.4s, v13.4s, v21.4s\n"
-    "smin v15.4s, v15.4s, v19.4s\n"
-    "smin v14.4s, v14.4s, v19.4s\n"
-    "smin v13.4s, v13.4s, v19.4s\n"
-    "smax v12.4s, v12.4s, v21.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v15.4s, v15.4s, v16.4s\n"
+    "smax v14.4s, v14.4s, v16.4s\n"
+    "smax v13.4s, v13.4s, v16.4s\n"
+    "smax v12.4s, v12.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v15.4s, v15.4s, v16.4s\n"
+    "smin v14.4s, v14.4s, v16.4s\n"
+    "smin v13.4s, v13.4s, v16.4s\n"
+    "smin v12.4s, v12.4s, v16.4s\n"
     "uzp1 v23.16b, v15.16b, v14.16b\n"
-    "smin v12.4s, v12.4s, v19.4s\n"
     "uzp1 v16.16b, v13.16b, v12.16b\n"
     "uzp1 v16.16b, v23.16b, v16.16b\n"
     "tbz %x[n_channels], #3, 38f\n"
@@ -698,9 +698,7 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
     : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
@@ -709,5 +707,4 @@
 
 }  // namespace pooling
 }  // namespace arm_conv
-
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp
index 1b97b45..fa9600f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void a64_u8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
 
-struct a64_u8q_nhwc_max_generic_depthfirst
+struct a64_u8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = a64_u8q_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
   a64_u8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return a64_u8q_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
index 0d196e0..c505074 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,12 +22,12 @@
  * SOFTWARE.
  */
 
-#include "pooling.hpp"
-#include <cstddef>
-#include <cstdint>
-
 #if defined(__aarch64__)
 
+#include "pooling.hpp"
+#include <cstdint>
+#include <cstddef>
+
 namespace arm_conv {
 namespace pooling {
 
@@ -42,30 +42,30 @@
 )
 {
   __asm__ __volatile__(
-    "mov x28, #0x0\n"
-    "mov x27, #0x10\n" // cntb _, ALL, #1
-    "mov x26, #0x20\n" // cntb _, ALL, #2
-    "mov x25, #0x30\n" // cntb _, ALL, #3
     "cmp %x[n_channels], #0x40\n"
+    "mov x28, #0x0\n"
+    "mov x27, #0x10\n"  // cntb _, ALL, #1
+    "mov x26, #0x20\n"  // cntb _, ALL, #2
+    "mov x25, #0x30\n"  // cntb _, ALL, #3
     "blt 7f\n"
     "1:"  // 4-vectors of channels
-    "movi v4.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "movi v8.16b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
     "movi v7.16b, #0x0\n"
+    "movi v3.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "movi v6.16b, #0x0\n"
+    "movi v5.16b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
-    "ldr q31, [x23, x27]\n"
-    "ldr q30, [x22, x27]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "ldr q29, [x21, x27]\n"
     "ldr q28, [x20, x27]\n"
     "ldr q27, [x23, x26]\n"
@@ -78,47 +78,47 @@
     "ldr q16, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "umax v22.16b, v31.16b, v30.16b\n"
-    "ldr q3, [x23, x28]\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
     "umax v18.16b, v29.16b, v28.16b\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "umax v21.16b, v27.16b, v21.16b\n"
-    "ldr q2, [x22, x28]\n"
     "umax v17.16b, v26.16b, v17.16b\n"
-    "ldr q1, [x21, x28]\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
     "umax v20.16b, v25.16b, v20.16b\n"
-    "ldr q0, [x20, x28]\n"
     "umax v16.16b, v24.16b, v16.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "ldr q30, [x22, x27]\n"
     "umax v18.16b, v22.16b, v18.16b\n"
-    "ldr q29, [x21, x27]\n"
+    "ldr q30, [x23, x27]\n"
+    "ldr q22, [x22, x27]\n"
     "umax v17.16b, v21.16b, v17.16b\n"
-    "ldr q28, [x20, x27]\n"
     "umax v16.16b, v20.16b, v16.16b\n"
+    "ldr q29, [x21, x27]\n"
+    "ldr q28, [x20, x27]\n"
+    "umax v7.16b, v7.16b, v19.16b\n"
+    "umax v3.16b, v3.16b, v18.16b\n"
     "ldr q27, [x23, x26]\n"
-    "umax v4.16b, v4.16b, v19.16b\n"
     "ldr q21, [x22, x26]\n"
-    "umax v8.16b, v8.16b, v18.16b\n"
+    "umax v6.16b, v6.16b, v17.16b\n"
+    "umax v5.16b, v5.16b, v16.16b\n"
     "ldr q26, [x21, x26]\n"
-    "umax v7.16b, v7.16b, v17.16b\n"
     "ldr q17, [x20, x26]\n"
-    "umax v6.16b, v6.16b, v16.16b\n"
     "ldr q25, [x23, x25]\n"
     "ldr q20, [x22, x25]\n"
     "ldr q24, [x21, x25]\n"
     "ldr q16, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "umax v23.16b, v3.16b, v2.16b\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
-    "umax v22.16b, v31.16b, v30.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
+    "umax v22.16b, v30.16b, v22.16b\n"
     "umax v18.16b, v29.16b, v28.16b\n"
     "umax v21.16b, v27.16b, v21.16b\n"
     "umax v17.16b, v26.16b, v17.16b\n"
@@ -128,179 +128,179 @@
     "umax v18.16b, v22.16b, v18.16b\n"
     "umax v17.16b, v21.16b, v17.16b\n"
     "umax v16.16b, v20.16b, v16.16b\n"
-    "umax v4.16b, v4.16b, v19.16b\n"
-    "umax v8.16b, v8.16b, v18.16b\n"
-    "umax v7.16b, v7.16b, v17.16b\n"
-    "umax v6.16b, v6.16b, v16.16b\n"
+    "umax v7.16b, v7.16b, v19.16b\n"
+    "umax v3.16b, v3.16b, v18.16b\n"
+    "umax v6.16b, v6.16b, v17.16b\n"
+    "umax v5.16b, v5.16b, v16.16b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "umax v4.16b, v4.16b, v3.16b\n"
-    "ldr q31, [x23, x27]\n"
+    "umax v7.16b, v7.16b, v2.16b\n"
+    "ldr q30, [x23, x27]\n"
     "ldr q27, [x23, x26]\n"
-    "umax v8.16b, v8.16b, v31.16b\n"
+    "umax v3.16b, v3.16b, v30.16b\n"
+    "umax v6.16b, v6.16b, v27.16b\n"
     "ldr q25, [x23, x25]\n"
-    "umax v7.16b, v7.16b, v27.16b\n"
-    "umax v6.16b, v6.16b, v25.16b\n"
+    "umax v5.16b, v5.16b, v25.16b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "uxtl v17.8h, v4.8b\n"
     "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
-    "ld1r { v5.4s }, [x19]\n"
-    "uxtl2 v16.8h, v4.16b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "uxtl v21.8h, v8.8b\n"
     "ld1r { v4.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "uxtl2 v20.8h, v8.16b\n"
-    "ld1r { v3.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    "uxtl v19.8h, v7.8b\n"
-    "ld1r { v2.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "uxtl v23.8h, v7.8b\n"
     "uxtl2 v24.8h, v7.16b\n"
+    "uxtl v22.8h, v3.8b\n"
+    "uxtl2 v21.8h, v3.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+    "ld1r { v3.4s }, [x19]\n"
+    "uxtl v20.8h, v6.8b\n"
+    "uxtl2 v17.8h, v6.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1r { v2.4s }, [x19]\n"
+    "uxtl v19.8h, v5.8b\n"
+    "uxtl2 v18.8h, v5.16b\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
     "ld1r { v1.4s }, [x19]\n"
+    "neg v4.4s, v4.4s\n"
+    "saddw v0.4s, v4.4s, v23.4h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "saddw2 v23.4s, v4.4s, v23.8h\n"
+    "saddw v31.4s, v4.4s, v24.4h\n"
     "sub %x[n_channels], %x[n_channels], #0x40\n"
-    "uxtl v0.8h, v6.8b\n"
     "cmp %x[n_channels], #0x40\n"
-    "uxtl2 v31.8h, v6.16b\n"
-    "neg v5.4s, v5.4s\n"
-    "movi v30.4s, #0x0\n"
-    "movi v29.4s, #0xff\n"
-    "saddw v23.4s, v5.4s, v17.4h\n"
-    "saddw2 v18.4s, v5.4s, v17.8h\n"
-    "saddw v17.4s, v5.4s, v16.4h\n"
-    "saddw2 v16.4s, v5.4s, v16.8h\n"
-    "saddw v22.4s, v5.4s, v21.4h\n"
-    "saddw2 v21.4s, v5.4s, v21.8h\n"
-    "saddw v28.4s, v5.4s, v20.4h\n"
-    "saddw2 v20.4s, v5.4s, v20.8h\n"
-    "saddw v27.4s, v5.4s, v19.4h\n"
-    "saddw2 v19.4s, v5.4s, v19.8h\n"
+    "saddw2 v30.4s, v4.4s, v24.8h\n"
+    "saddw v29.4s, v4.4s, v22.4h\n"
+    "saddw2 v22.4s, v4.4s, v22.8h\n"
+    "saddw v28.4s, v4.4s, v21.4h\n"
+    "saddw2 v21.4s, v4.4s, v21.8h\n"
+    "saddw v27.4s, v4.4s, v20.4h\n"
+    "saddw2 v20.4s, v4.4s, v20.8h\n"
+    "saddw v26.4s, v4.4s, v17.4h\n"
+    "saddw2 v17.4s, v4.4s, v17.8h\n"
+    "saddw v25.4s, v4.4s, v19.4h\n"
+    "saddw2 v19.4s, v4.4s, v19.8h\n"
+    "saddw v24.4s, v4.4s, v18.4h\n"
+    "saddw2 v18.4s, v4.4s, v18.8h\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
     "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v18.4s, v18.4s, v3.4s\n"
-    "srshl v17.4s, v17.4s, v3.4s\n"
-    "srshl v16.4s, v16.4s, v3.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
-    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
-    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
-    "srshl v23.4s, v23.4s, v2.4s\n"
-    "srshl v18.4s, v18.4s, v2.4s\n"
-    "srshl v17.4s, v17.4s, v2.4s\n"
-    "srshl v16.4s, v16.4s, v2.4s\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "srshl v30.4s, v30.4s, v3.4s\n"
+    "srshl v29.4s, v29.4s, v3.4s\n"
     "srshl v22.4s, v22.4s, v3.4s\n"
-    "srshl v21.4s, v21.4s, v3.4s\n"
     "srshl v28.4s, v28.4s, v3.4s\n"
-    "srshl v20.4s, v20.4s, v3.4s\n"
-    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
-    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
-    "sqrdmulh v28.4s, v28.4s, v4.4s\n"
-    "sqrdmulh v20.4s, v20.4s, v4.4s\n"
-    "srshl v22.4s, v22.4s, v2.4s\n"
-    "srshl v21.4s, v21.4s, v2.4s\n"
-    "srshl v28.4s, v28.4s, v2.4s\n"
-    "srshl v20.4s, v20.4s, v2.4s\n"
-    "srshl v27.4s, v27.4s, v3.4s\n"
-    "srshl v19.4s, v19.4s, v3.4s\n"
-    "add v23.4s, v23.4s, v1.4s\n"
-    "add v18.4s, v18.4s, v1.4s\n"
-    "sqrdmulh v27.4s, v27.4s, v4.4s\n"
-    "sqrdmulh v19.4s, v19.4s, v4.4s\n"
-    "add v17.4s, v17.4s, v1.4s\n"
-    "add v16.4s, v16.4s, v1.4s\n"
-    "srshl v27.4s, v27.4s, v2.4s\n"
-    "srshl v19.4s, v19.4s, v2.4s\n"
-    "add v22.4s, v22.4s, v1.4s\n"
-    "add v21.4s, v21.4s, v1.4s\n"
-    "add v28.4s, v28.4s, v1.4s\n"
-    "add v20.4s, v20.4s, v1.4s\n"
-    "add v27.4s, v27.4s, v1.4s\n"
-    "add v19.4s, v19.4s, v1.4s\n"
-    "smax v23.4s, v23.4s, v30.4s\n"
-    "smax v18.4s, v18.4s, v30.4s\n"
-    "smax v17.4s, v17.4s, v30.4s\n"
-    "smin v23.4s, v23.4s, v29.4s\n"
-    "smin v18.4s, v18.4s, v29.4s\n"
-    "smin v17.4s, v17.4s, v29.4s\n"
-    "smax v16.4s, v16.4s, v30.4s\n"
-    "smax v22.4s, v22.4s, v30.4s\n"
-    "smax v21.4s, v21.4s, v30.4s\n"
-    "smin v16.4s, v16.4s, v29.4s\n"
-    "smin v22.4s, v22.4s, v29.4s\n"
-    "smin v21.4s, v21.4s, v29.4s\n"
-    "smax v28.4s, v28.4s, v30.4s\n"
-    "smax v20.4s, v20.4s, v30.4s\n"
-    "smax v27.4s, v27.4s, v30.4s\n"
-    "smin v28.4s, v28.4s, v29.4s\n"
-    "smin v20.4s, v20.4s, v29.4s\n"
-    "smin v27.4s, v27.4s, v29.4s\n"
-    "smax v19.4s, v19.4s, v30.4s\n"
-    "uzp1 v26.16b, v23.16b, v18.16b\n"
-    "saddw v25.4s, v5.4s, v24.4h\n"
-    "saddw2 v18.4s, v5.4s, v24.8h\n"
-    "smin v19.4s, v19.4s, v29.4s\n"
-    "srshl v25.4s, v25.4s, v3.4s\n"
-    "srshl v18.4s, v18.4s, v3.4s\n"
-    "uzp1 v24.16b, v17.16b, v16.16b\n"
-    "saddw v17.4s, v5.4s, v0.4h\n"
-    "saddw2 v16.4s, v5.4s, v0.8h\n"
-    "sqrdmulh v25.4s, v25.4s, v4.4s\n"
-    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
-    "srshl v17.4s, v17.4s, v3.4s\n"
-    "srshl v16.4s, v16.4s, v3.4s\n"
-    "srshl v25.4s, v25.4s, v2.4s\n"
-    "srshl v18.4s, v18.4s, v2.4s\n"
-    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
-    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
-    "add v25.4s, v25.4s, v1.4s\n"
-    "add v18.4s, v18.4s, v1.4s\n"
-    "srshl v17.4s, v17.4s, v2.4s\n"
-    "srshl v16.4s, v16.4s, v2.4s\n"
-    "smax v25.4s, v25.4s, v30.4s\n"
-    "smax v18.4s, v18.4s, v30.4s\n"
-    "add v17.4s, v17.4s, v1.4s\n"
-    "add v16.4s, v16.4s, v1.4s\n"
-    "smin v25.4s, v25.4s, v29.4s\n"
-    "smin v18.4s, v18.4s, v29.4s\n"
-    "smax v17.4s, v17.4s, v30.4s\n"
-    "smax v16.4s, v16.4s, v30.4s\n"
-    "uzp1 v23.16b, v22.16b, v21.16b\n"
-    "saddw v22.4s, v5.4s, v31.4h\n"
-    "saddw2 v21.4s, v5.4s, v31.8h\n"
-    "smin v17.4s, v17.4s, v29.4s\n"
-    "srshl v22.4s, v22.4s, v3.4s\n"
     "srshl v21.4s, v21.4s, v3.4s\n"
-    "smin v16.4s, v16.4s, v29.4s\n"
-    "uzp1 v20.16b, v28.16b, v20.16b\n"
-    "sqrdmulh v22.4s, v22.4s, v4.4s\n"
-    "sqrdmulh v21.4s, v21.4s, v4.4s\n"
-    "uzp1 v19.16b, v27.16b, v19.16b\n"
-    "uzp1 v18.16b, v25.16b, v18.16b\n"
-    "srshl v22.4s, v22.4s, v2.4s\n"
-    "srshl v21.4s, v21.4s, v2.4s\n"
-    "uzp1 v17.16b, v17.16b, v16.16b\n"
-    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "srshl v27.4s, v27.4s, v3.4s\n"
+    "srshl v20.4s, v20.4s, v3.4s\n"
+    "srshl v26.4s, v26.4s, v3.4s\n"
+    "srshl v17.4s, v17.4s, v3.4s\n"
+    "srshl v25.4s, v25.4s, v3.4s\n"
+    "srshl v19.4s, v19.4s, v3.4s\n"
+    "srshl v24.4s, v24.4s, v3.4s\n"
+    "srshl v18.4s, v18.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v2.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v2.4s\n"
+    "sqrdmulh v30.4s, v30.4s, v2.4s\n"
+    "sqrdmulh v29.4s, v29.4s, v2.4s\n"
+    "sqrdmulh v22.4s, v22.4s, v2.4s\n"
+    "sqrdmulh v28.4s, v28.4s, v2.4s\n"
+    "sqrdmulh v21.4s, v21.4s, v2.4s\n"
+    "sqrdmulh v27.4s, v27.4s, v2.4s\n"
+    "sqrdmulh v20.4s, v20.4s, v2.4s\n"
+    "sqrdmulh v26.4s, v26.4s, v2.4s\n"
+    "sqrdmulh v17.4s, v17.4s, v2.4s\n"
+    "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+    "sqrdmulh v19.4s, v19.4s, v2.4s\n"
+    "sqrdmulh v24.4s, v24.4s, v2.4s\n"
+    "sqrdmulh v18.4s, v18.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v1.4s\n"
+    "srshl v23.4s, v23.4s, v1.4s\n"
+    "srshl v31.4s, v31.4s, v1.4s\n"
+    "srshl v30.4s, v30.4s, v1.4s\n"
+    "srshl v29.4s, v29.4s, v1.4s\n"
+    "srshl v22.4s, v22.4s, v1.4s\n"
+    "srshl v28.4s, v28.4s, v1.4s\n"
+    "srshl v21.4s, v21.4s, v1.4s\n"
+    "srshl v27.4s, v27.4s, v1.4s\n"
+    "srshl v20.4s, v20.4s, v1.4s\n"
+    "srshl v26.4s, v26.4s, v1.4s\n"
+    "srshl v17.4s, v17.4s, v1.4s\n"
+    "srshl v25.4s, v25.4s, v1.4s\n"
+    "srshl v19.4s, v19.4s, v1.4s\n"
+    "srshl v24.4s, v24.4s, v1.4s\n"
+    "srshl v18.4s, v18.4s, v1.4s\n"
+    "add v0.4s, v0.4s, v16.4s\n"
+    "add v23.4s, v23.4s, v16.4s\n"
+    "add v31.4s, v31.4s, v16.4s\n"
+    "add v30.4s, v30.4s, v16.4s\n"
+    "add v29.4s, v29.4s, v16.4s\n"
+    "add v22.4s, v22.4s, v16.4s\n"
+    "add v28.4s, v28.4s, v16.4s\n"
+    "add v21.4s, v21.4s, v16.4s\n"
+    "add v27.4s, v27.4s, v16.4s\n"
+    "add v20.4s, v20.4s, v16.4s\n"
+    "add v26.4s, v26.4s, v16.4s\n"
+    "add v17.4s, v17.4s, v16.4s\n"
+    "add v25.4s, v25.4s, v16.4s\n"
+    "add v19.4s, v19.4s, v16.4s\n"
+    "add v24.4s, v24.4s, v16.4s\n"
+    "add v18.4s, v18.4s, v16.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smax v30.4s, v30.4s, v16.4s\n"
+    "smax v29.4s, v29.4s, v16.4s\n"
+    "smax v22.4s, v22.4s, v16.4s\n"
+    "smax v28.4s, v28.4s, v16.4s\n"
+    "smax v21.4s, v21.4s, v16.4s\n"
+    "smax v27.4s, v27.4s, v16.4s\n"
+    "smax v20.4s, v20.4s, v16.4s\n"
+    "smax v26.4s, v26.4s, v16.4s\n"
+    "smax v17.4s, v17.4s, v16.4s\n"
+    "smax v25.4s, v25.4s, v16.4s\n"
+    "smax v19.4s, v19.4s, v16.4s\n"
+    "smax v24.4s, v24.4s, v16.4s\n"
+    "smax v18.4s, v18.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v0.4s, v0.4s, v16.4s\n"
+    "smin v23.4s, v23.4s, v16.4s\n"
+    "smin v31.4s, v31.4s, v16.4s\n"
+    "smin v30.4s, v30.4s, v16.4s\n"
+    "smin v29.4s, v29.4s, v16.4s\n"
+    "smin v22.4s, v22.4s, v16.4s\n"
+    "smin v28.4s, v28.4s, v16.4s\n"
+    "smin v21.4s, v21.4s, v16.4s\n"
+    "smin v27.4s, v27.4s, v16.4s\n"
+    "smin v20.4s, v20.4s, v16.4s\n"
+    "smin v26.4s, v26.4s, v16.4s\n"
+    "smin v17.4s, v17.4s, v16.4s\n"
+    "smin v25.4s, v25.4s, v16.4s\n"
+    "smin v19.4s, v19.4s, v16.4s\n"
+    "smin v24.4s, v24.4s, v16.4s\n"
+    "smin v18.4s, v18.4s, v16.4s\n"
+    "uzp1 v23.16b, v0.16b, v23.16b\n"
+    "uzp1 v16.16b, v31.16b, v30.16b\n"
+    "uzp1 v22.16b, v29.16b, v22.16b\n"
+    "uzp1 v21.16b, v28.16b, v21.16b\n"
+    "uzp1 v20.16b, v27.16b, v20.16b\n"
+    "uzp1 v17.16b, v26.16b, v17.16b\n"
+    "uzp1 v19.16b, v25.16b, v19.16b\n"
+    "uzp1 v18.16b, v24.16b, v18.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
     "str q16, [%x[outptr], x28]\n"
-    "add v22.4s, v22.4s, v1.4s\n"
     "add x28, x28, #0x40\n"
-    "add v21.4s, v21.4s, v1.4s\n"
-    "uzp1 v16.16b, v23.16b, v20.16b\n"
-    "str q16, [%x[outptr], x27]\n"
-    "smax v22.4s, v22.4s, v30.4s\n"
-    "add x27, x27, #0x40\n"
-    "smax v21.4s, v21.4s, v30.4s\n"
-    "uzp1 v16.16b, v19.16b, v18.16b\n"
-    "str q16, [%x[outptr], x26]\n"
-    "smin v22.4s, v22.4s, v29.4s\n"
-    "add x26, x26, #0x40\n"
-    "smin v21.4s, v21.4s, v29.4s\n"
     "uzp1 v16.16b, v22.16b, v21.16b\n"
-    "uzp1 v16.16b, v17.16b, v16.16b\n"
+    "uzp1 v17.16b, v20.16b, v17.16b\n"
+    "str q16, [%x[outptr], x27]\n"
+    "add x27, x27, #0x40\n"
+    "uzp1 v16.16b, v19.16b, v18.16b\n"
+    "str q17, [%x[outptr], x26]\n"
+    "add x26, x26, #0x40\n"
     "str q16, [%x[outptr], x25]\n"
     "add x25, x25, #0x40\n"
     "bge 1b\n"
@@ -309,316 +309,316 @@
     "cmp %x[n_channels], #0x10\n"
     "blt 14f\n"
     "8:"  // Single vector of channels: Loop
-    "movi v4.16b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "movi v7.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "ldr q3, [x23, x28]\n"
-    "ldr q2, [x22, x28]\n"
-    "umax v4.16b, v4.16b, v19.16b\n"
-    "ldr q1, [x21, x28]\n"
-    "ldr q0, [x20, x28]\n"
+    "subs x24, x24, #0x1\n"
+    "umax v7.16b, v7.16b, v19.16b\n"
+    "add x19, x19, #0x20\n"
+    "ldr q2, [x23, x28]\n"
+    "ldr q1, [x22, x28]\n"
+    "ldr q0, [x21, x28]\n"
+    "ldr q31, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "umax v23.16b, v3.16b, v2.16b\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "umax v4.16b, v4.16b, v19.16b\n"
+    "umax v7.16b, v7.16b, v19.16b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ldr q2, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ldr q3, [x23, x28]\n"
-    "umax v4.16b, v4.16b, v3.16b\n"
+    "umax v7.16b, v7.16b, v2.16b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "uxtl v17.8h, v4.8b\n"
     "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
-    "ld1r { v5.4s }, [x19]\n"
-    "uxtl2 v16.8h, v4.16b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "movi v30.4s, #0x0\n"
     "ld1r { v4.4s }, [x19]\n"
+    "uxtl v23.8h, v7.8b\n"
+    "uxtl2 v24.8h, v7.16b\n"
+    "neg v4.4s, v4.4s\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "movi v29.4s, #0xff\n"
+    "saddw v0.4s, v4.4s, v23.4h\n"
     "ld1r { v3.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    "neg v5.4s, v5.4s\n"
+    "saddw2 v23.4s, v4.4s, v23.8h\n"
+    "saddw v31.4s, v4.4s, v24.4h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
     "ld1r { v2.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "saddw v23.4s, v5.4s, v17.4h\n"
+    "saddw2 v30.4s, v4.4s, v24.8h\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
     "ld1r { v1.4s }, [x19]\n"
-    "sub %x[n_channels], %x[n_channels], #0x10\n"
-    "saddw2 v18.4s, v5.4s, v17.8h\n"
-    "cmp %x[n_channels], #0x10\n"
-    "saddw v17.4s, v5.4s, v16.4h\n"
-    "saddw2 v16.4s, v5.4s, v16.8h\n"
     "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v18.4s, v18.4s, v3.4s\n"
-    "srshl v17.4s, v17.4s, v3.4s\n"
-    "srshl v16.4s, v16.4s, v3.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
-    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
-    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
-    "srshl v23.4s, v23.4s, v2.4s\n"
-    "srshl v18.4s, v18.4s, v2.4s\n"
-    "srshl v17.4s, v17.4s, v2.4s\n"
-    "srshl v16.4s, v16.4s, v2.4s\n"
-    "add v23.4s, v23.4s, v1.4s\n"
-    "add v18.4s, v18.4s, v1.4s\n"
-    "add v17.4s, v17.4s, v1.4s\n"
-    "add v16.4s, v16.4s, v1.4s\n"
-    "smax v23.4s, v23.4s, v30.4s\n"
-    "smax v18.4s, v18.4s, v30.4s\n"
-    "smax v17.4s, v17.4s, v30.4s\n"
-    "smin v23.4s, v23.4s, v29.4s\n"
-    "smin v18.4s, v18.4s, v29.4s\n"
-    "smin v17.4s, v17.4s, v29.4s\n"
-    "smax v16.4s, v16.4s, v30.4s\n"
-    "uzp1 v26.16b, v23.16b, v18.16b\n"
-    "smin v16.4s, v16.4s, v29.4s\n"
-    "uzp1 v24.16b, v17.16b, v16.16b\n"
-    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "srshl v30.4s, v30.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v2.4s\n"
+    "sub %x[n_channels], %x[n_channels], #0x10\n"
+    "cmp %x[n_channels], #0x10\n"
+    "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v2.4s\n"
+    "sqrdmulh v30.4s, v30.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v1.4s\n"
+    "srshl v23.4s, v23.4s, v1.4s\n"
+    "srshl v31.4s, v31.4s, v1.4s\n"
+    "srshl v30.4s, v30.4s, v1.4s\n"
+    "add v0.4s, v0.4s, v16.4s\n"
+    "add v23.4s, v23.4s, v16.4s\n"
+    "add v31.4s, v31.4s, v16.4s\n"
+    "add v30.4s, v30.4s, v16.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smax v30.4s, v30.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v0.4s, v0.4s, v16.4s\n"
+    "smin v23.4s, v23.4s, v16.4s\n"
+    "smin v31.4s, v31.4s, v16.4s\n"
+    "smin v30.4s, v30.4s, v16.4s\n"
+    "uzp1 v23.16b, v0.16b, v23.16b\n"
+    "uzp1 v16.16b, v31.16b, v30.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
     "str q16, [%x[outptr], x28]\n"
     "add x28, x28, #0x10\n"
     "bge 8b\n"
     "cbz %x[n_channels], 43f\n"
     "14:"  // Oddments
-    "movi v4.16b, #0x0\n"
-    "add %x[outptr], %x[outptr], x28\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "add %x[outptr], %x[outptr], x28\n"
+    "movi v7.16b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 24f\n"
     "15:"  // Oddments: 4 inputs loop
-    "movi v3.16b, #0x0\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "add x23, x23, x28\n"
-    "movi v2.16b, #0x0\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "movi v1.16b, #0x0\n"
     "add x19, x19, #0x20\n"
-    "movi v0.16b, #0x0\n"
+    "add x23, x23, x28\n"
     "add x22, x22, x28\n"
     "add x21, x21, x28\n"
+    "movi v2.16b, #0x0\n"
+    "movi v1.16b, #0x0\n"
     "add x20, x20, x28\n"
+    "movi v0.16b, #0x0\n"
+    "movi v31.16b, #0x0\n"
     "tbz %x[n_channels], #3, 19f\n"
-    "ldr d3, [x23], #0x8\n"
-    "ldr d2, [x22], #0x8\n"
-    "ldr d1, [x21], #0x8\n"
-    "ldr d0, [x20], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
+    "ldr d1, [x22], #0x8\n"
+    "ldr d0, [x21], #0x8\n"
+    "ldr d31, [x20], #0x8\n"
     "tbz %x[n_channels], #2, 17f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
-    "ld1 { v2.s }[2], [x22], #0x4\n"
-    "ld1 { v1.s }[2], [x21], #0x4\n"
-    "ld1 { v0.s }[2], [x20], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
+    "ld1 { v1.s }[2], [x22], #0x4\n"
+    "ld1 { v0.s }[2], [x21], #0x4\n"
+    "ld1 { v31.s }[2], [x20], #0x4\n"
     "tbz %x[n_channels], #1, 16f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
-    "ld1 { v2.h }[6], [x22], #0x2\n"
-    "ld1 { v1.h }[6], [x21], #0x2\n"
-    "ld1 { v0.h }[6], [x20], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
+    "ld1 { v1.h }[6], [x22], #0x2\n"
+    "ld1 { v0.h }[6], [x21], #0x2\n"
+    "ld1 { v31.h }[6], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
-    "ld1 { v2.b }[14], [x22], #0x1\n"
-    "ld1 { v1.b }[14], [x21], #0x1\n"
-    "ld1 { v0.b }[14], [x20], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
+    "ld1 { v1.b }[14], [x22], #0x1\n"
+    "ld1 { v0.b }[14], [x21], #0x1\n"
+    "ld1 { v31.b }[14], [x20], #0x1\n"
     "b 23f\n"
     "16:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
-    "ld1 { v2.b }[12], [x22], #0x1\n"
-    "ld1 { v1.b }[12], [x21], #0x1\n"
-    "ld1 { v0.b }[12], [x20], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
+    "ld1 { v1.b }[12], [x22], #0x1\n"
+    "ld1 { v0.b }[12], [x21], #0x1\n"
+    "ld1 { v31.b }[12], [x20], #0x1\n"
     "b 23f\n"
     "17:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 18f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
-    "ld1 { v2.h }[4], [x22], #0x2\n"
-    "ld1 { v1.h }[4], [x21], #0x2\n"
-    "ld1 { v0.h }[4], [x20], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
+    "ld1 { v1.h }[4], [x22], #0x2\n"
+    "ld1 { v0.h }[4], [x21], #0x2\n"
+    "ld1 { v31.h }[4], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
-    "ld1 { v2.b }[10], [x22], #0x1\n"
-    "ld1 { v1.b }[10], [x21], #0x1\n"
-    "ld1 { v0.b }[10], [x20], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
+    "ld1 { v1.b }[10], [x22], #0x1\n"
+    "ld1 { v0.b }[10], [x21], #0x1\n"
+    "ld1 { v31.b }[10], [x20], #0x1\n"
     "b 23f\n"
     "18:"  // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
-    "ld1 { v2.b }[8], [x22], #0x1\n"
-    "ld1 { v1.b }[8], [x21], #0x1\n"
-    "ld1 { v0.b }[8], [x20], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
+    "ld1 { v1.b }[8], [x22], #0x1\n"
+    "ld1 { v0.b }[8], [x21], #0x1\n"
+    "ld1 { v31.b }[8], [x20], #0x1\n"
     "b 23f\n"
     "19:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 21f\n"
-    "ldr s3, [x23], #0x4\n"
-    "ldr s2, [x22], #0x4\n"
-    "ldr s1, [x21], #0x4\n"
-    "ldr s0, [x20], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
+    "ldr s1, [x22], #0x4\n"
+    "ldr s0, [x21], #0x4\n"
+    "ldr s31, [x20], #0x4\n"
     "tbz %x[n_channels], #1, 20f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
-    "ld1 { v2.h }[2], [x22], #0x2\n"
-    "ld1 { v1.h }[2], [x21], #0x2\n"
-    "ld1 { v0.h }[2], [x20], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
+    "ld1 { v1.h }[2], [x22], #0x2\n"
+    "ld1 { v0.h }[2], [x21], #0x2\n"
+    "ld1 { v31.h }[2], [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
-    "ld1 { v2.b }[6], [x22], #0x1\n"
-    "ld1 { v1.b }[6], [x21], #0x1\n"
-    "ld1 { v0.b }[6], [x20], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
+    "ld1 { v1.b }[6], [x22], #0x1\n"
+    "ld1 { v0.b }[6], [x21], #0x1\n"
+    "ld1 { v31.b }[6], [x20], #0x1\n"
     "b 23f\n"
     "20:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
-    "ld1 { v2.b }[4], [x22], #0x1\n"
-    "ld1 { v1.b }[4], [x21], #0x1\n"
-    "ld1 { v0.b }[4], [x20], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
+    "ld1 { v1.b }[4], [x22], #0x1\n"
+    "ld1 { v0.b }[4], [x21], #0x1\n"
+    "ld1 { v31.b }[4], [x20], #0x1\n"
     "b 23f\n"
     "21:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 22f\n"
-    "ldr h3, [x23], #0x2\n"
-    "ldr h2, [x22], #0x2\n"
-    "ldr h1, [x21], #0x2\n"
-    "ldr h0, [x20], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
+    "ldr h1, [x22], #0x2\n"
+    "ldr h0, [x21], #0x2\n"
+    "ldr h31, [x20], #0x2\n"
     "tbz %x[n_channels], #0, 23f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
-    "ld1 { v2.b }[2], [x22], #0x1\n"
-    "ld1 { v1.b }[2], [x21], #0x1\n"
-    "ld1 { v0.b }[2], [x20], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
+    "ld1 { v1.b }[2], [x22], #0x1\n"
+    "ld1 { v0.b }[2], [x21], #0x1\n"
+    "ld1 { v31.b }[2], [x20], #0x1\n"
     "b 23f\n"
     "22:"  // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 23f\n"
-    "ldr b3, [x23], #0x1\n"
-    "ldr b2, [x22], #0x1\n"
-    "ldr b1, [x21], #0x1\n"
-    "ldr b0, [x20], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
+    "ldr b1, [x22], #0x1\n"
+    "ldr b0, [x21], #0x1\n"
+    "ldr b31, [x20], #0x1\n"
     "23:"  // Oddments: 4 inputs loop: Load: Bit 3: End
-    "umax v23.16b, v3.16b, v2.16b\n"
+    "umax v23.16b, v2.16b, v1.16b\n"
+    "umax v19.16b, v0.16b, v31.16b\n"
     "subs x24, x24, #0x1\n"
-    "umax v19.16b, v1.16b, v0.16b\n"
     "umax v19.16b, v23.16b, v19.16b\n"
-    "umax v4.16b, v4.16b, v19.16b\n"
+    "umax v7.16b, v7.16b, v19.16b\n"
     "bgt 15b\n"
     "24:"  // Oddments: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 34f\n"
     "25:"  // Oddments: Single input loop
-    "movi v3.16b, #0x0\n"
     "ldr x23, [x19], #0x8\n"
     "add x23, x23, x28\n"
+    "movi v2.16b, #0x0\n"
     "tbz %x[n_channels], #3, 29f\n"
-    "ldr d3, [x23], #0x8\n"
+    "ldr d2, [x23], #0x8\n"
     "tbz %x[n_channels], #2, 27f\n"
-    "ld1 { v3.s }[2], [x23], #0x4\n"
+    "ld1 { v2.s }[2], [x23], #0x4\n"
     "tbz %x[n_channels], #1, 26f\n"
-    "ld1 { v3.h }[6], [x23], #0x2\n"
+    "ld1 { v2.h }[6], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[14], [x23], #0x1\n"
+    "ld1 { v2.b }[14], [x23], #0x1\n"
     "b 33f\n"
     "26:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[12], [x23], #0x1\n"
+    "ld1 { v2.b }[12], [x23], #0x1\n"
     "b 33f\n"
     "27:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
     "tbz %x[n_channels], #1, 28f\n"
-    "ld1 { v3.h }[4], [x23], #0x2\n"
+    "ld1 { v2.h }[4], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[10], [x23], #0x1\n"
+    "ld1 { v2.b }[10], [x23], #0x1\n"
     "b 33f\n"
     "28:"  // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[8], [x23], #0x1\n"
+    "ld1 { v2.b }[8], [x23], #0x1\n"
     "b 33f\n"
     "29:"  // Oddments: Single input loop: Load: Bit 3: Unset
     "tbz %x[n_channels], #2, 31f\n"
-    "ldr s3, [x23], #0x4\n"
+    "ldr s2, [x23], #0x4\n"
     "tbz %x[n_channels], #1, 30f\n"
-    "ld1 { v3.h }[2], [x23], #0x2\n"
+    "ld1 { v2.h }[2], [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[6], [x23], #0x1\n"
+    "ld1 { v2.b }[6], [x23], #0x1\n"
     "b 33f\n"
     "30:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[4], [x23], #0x1\n"
+    "ld1 { v2.b }[4], [x23], #0x1\n"
     "b 33f\n"
     "31:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
     "tbz %x[n_channels], #1, 32f\n"
-    "ldr h3, [x23], #0x2\n"
+    "ldr h2, [x23], #0x2\n"
     "tbz %x[n_channels], #0, 33f\n"
-    "ld1 { v3.b }[2], [x23], #0x1\n"
+    "ld1 { v2.b }[2], [x23], #0x1\n"
     "b 33f\n"
     "32:"  // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
     "tbz %x[n_channels], #0, 33f\n"
-    "ldr b3, [x23], #0x1\n"
+    "ldr b2, [x23], #0x1\n"
     "33:"  // Oddments: Single input loop: Load: Bit 3: End
-    "umax v4.16b, v4.16b, v3.16b\n"
     "subs x20, x20, #0x1\n"
+    "umax v7.16b, v7.16b, v2.16b\n"
     "bgt 25b\n"
     "34:"  // Oddments: Single input loop: End
-    "uxtl v17.8h, v4.8b\n"
     "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
-    "ld1r { v5.4s }, [x19]\n"
-    "uxtl2 v16.8h, v4.16b\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "movi v30.4s, #0x0\n"
     "ld1r { v4.4s }, [x19]\n"
+    "uxtl v23.8h, v7.8b\n"
+    "uxtl2 v24.8h, v7.16b\n"
+    "neg v4.4s, v4.4s\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    "movi v29.4s, #0xff\n"
+    "saddw v0.4s, v4.4s, v23.4h\n"
     "ld1r { v3.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    "neg v5.4s, v5.4s\n"
+    "saddw2 v23.4s, v4.4s, v23.8h\n"
+    "saddw v31.4s, v4.4s, v24.4h\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
     "ld1r { v2.4s }, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "saddw v23.4s, v5.4s, v17.4h\n"
+    "saddw2 v30.4s, v4.4s, v24.8h\n"
+    "srshl v0.4s, v0.4s, v3.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
     "ld1r { v1.4s }, [x19]\n"
-    "saddw2 v18.4s, v5.4s, v17.8h\n"
-    "saddw v17.4s, v5.4s, v16.4h\n"
-    "saddw2 v16.4s, v5.4s, v16.8h\n"
     "srshl v23.4s, v23.4s, v3.4s\n"
-    "srshl v18.4s, v18.4s, v3.4s\n"
-    "srshl v17.4s, v17.4s, v3.4s\n"
-    "srshl v16.4s, v16.4s, v3.4s\n"
-    "sqrdmulh v23.4s, v23.4s, v4.4s\n"
-    "sqrdmulh v18.4s, v18.4s, v4.4s\n"
-    "sqrdmulh v17.4s, v17.4s, v4.4s\n"
-    "sqrdmulh v16.4s, v16.4s, v4.4s\n"
-    "srshl v23.4s, v23.4s, v2.4s\n"
-    "srshl v18.4s, v18.4s, v2.4s\n"
-    "srshl v17.4s, v17.4s, v2.4s\n"
-    "srshl v16.4s, v16.4s, v2.4s\n"
-    "add v23.4s, v23.4s, v1.4s\n"
-    "add v18.4s, v18.4s, v1.4s\n"
-    "add v17.4s, v17.4s, v1.4s\n"
-    "add v16.4s, v16.4s, v1.4s\n"
-    "smax v23.4s, v23.4s, v30.4s\n"
-    "smax v18.4s, v18.4s, v30.4s\n"
-    "smax v17.4s, v17.4s, v30.4s\n"
-    "smin v23.4s, v23.4s, v29.4s\n"
-    "smin v18.4s, v18.4s, v29.4s\n"
-    "smin v17.4s, v17.4s, v29.4s\n"
-    "smax v16.4s, v16.4s, v30.4s\n"
-    "uzp1 v26.16b, v23.16b, v18.16b\n"
-    "smin v16.4s, v16.4s, v29.4s\n"
-    "uzp1 v24.16b, v17.16b, v16.16b\n"
-    "uzp1 v16.16b, v26.16b, v24.16b\n"
+    "srshl v31.4s, v31.4s, v3.4s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+    "ld1r { v16.4s }, [x19]\n"
+    "srshl v30.4s, v30.4s, v3.4s\n"
+    "sqrdmulh v0.4s, v0.4s, v2.4s\n"
+    "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+    "sqrdmulh v31.4s, v31.4s, v2.4s\n"
+    "sqrdmulh v30.4s, v30.4s, v2.4s\n"
+    "srshl v0.4s, v0.4s, v1.4s\n"
+    "srshl v23.4s, v23.4s, v1.4s\n"
+    "srshl v31.4s, v31.4s, v1.4s\n"
+    "srshl v30.4s, v30.4s, v1.4s\n"
+    "add v0.4s, v0.4s, v16.4s\n"
+    "add v23.4s, v23.4s, v16.4s\n"
+    "add v31.4s, v31.4s, v16.4s\n"
+    "add v30.4s, v30.4s, v16.4s\n"
+    "movi v16.4s, #0x0\n"
+    "smax v0.4s, v0.4s, v16.4s\n"
+    "smax v23.4s, v23.4s, v16.4s\n"
+    "smax v31.4s, v31.4s, v16.4s\n"
+    "smax v30.4s, v30.4s, v16.4s\n"
+    "movi v16.4s, #0xff\n"
+    "smin v0.4s, v0.4s, v16.4s\n"
+    "smin v23.4s, v23.4s, v16.4s\n"
+    "smin v31.4s, v31.4s, v16.4s\n"
+    "smin v30.4s, v30.4s, v16.4s\n"
+    "uzp1 v23.16b, v0.16b, v23.16b\n"
+    "uzp1 v16.16b, v31.16b, v30.16b\n"
+    "uzp1 v16.16b, v23.16b, v16.16b\n"
     "tbz %x[n_channels], #3, 38f\n"
     "st1 { v16.d }[0], [%x[outptr]], #0x8\n"
     "tbz %x[n_channels], #2, 36f\n"
@@ -664,16 +664,13 @@
     "tbz %x[n_channels], #0, 42f\n"
     "st1 { v16.b }[0], [%x[outptr]], #0x1\n"
     "42:"  // Oddments: Store: Bit 3: End
-
     "43:"  // End
-
     : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
     : [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
-    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+    : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
-
-#endif  // defined(__aarch64__)
+#endif // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp
index 6dffdcf..225f1e4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,18 +33,11 @@
 void cpp_nhwc_1x1_stride_any_depthfirst_impl(const uint64_t, const uint64_t, uint64_t n_channels, const T *const *const inptrs, T *outptr);
 
 template <typename T>
-struct cpp_nhwc_1x1_stride_any_depthfirst
+struct cpp_nhwc_1x1_stride_any_depthfirst : IGenericDepthfirstStrategy<T, T, Nothing>
 {
-  typedef T operand_type;
-  typedef T return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t, uint64_t n_channels, const operand_type *const *const inptrs, return_type *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-  kern_type kernel = cpp_nhwc_1x1_stride_any_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<T, T, Nothing>;
   cpp_nhwc_1x1_stride_any_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return cpp_nhwc_1x1_stride_any_depthfirst_impl<T>; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
index 0167d78..f829323 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst
+struct sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
 
-  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+  sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 3; }
-  constexpr static unsigned int pool_cols(void) { return 3; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
-
-  sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index a1a530b..75e4ddc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -83,121 +83,121 @@
 
   __asm__ __volatile__(
     "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x4, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x5, #0x0\n"
-    "ldr x6, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x4, #0x0\n"
     "mov x19, #0x4\n"
-    "add x7, %x[args], %[offsetof_rescale]\n"
-    "ldp x8, x17, [x20, #0x0]\n"
-    "ldp x16, x15, [x20, #0x10]\n"
+    "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x6, x7, [x20, #0x0]\n"
     "whilelt p0.h, XZR, x19\n"
-    "ldp x14, x13, [x6, #0x0]\n"
     "whilelt p1.h, x4, x3\n"
-    "ldp x12, x11, [x6, #0x10]\n"
-    "ldp x10, x9, [x6, #0x20]\n"
-    "ldp x28, x27, [x6, #0x30]\n"
-    "ldp x26, x25, [x6, #0x40]\n"
-    "ldp x24, x23, [x6, #0x50]\n"
-    "ldp x22, x21, [x6, #0x60]\n"
-    "ldp x20, x19, [x6, #0x70]\n"
-    "ld1rqh { z7.h }, p0/Z, [x7]\n"
-    "ld1h { z8.h }, p1/Z, [x9, x4, LSL #1]\n"
+    "ldp x8, x17, [x20, #0x10]\n"
+    "ldp x16, x15, [x5, #0x0]\n"
+    "add x14, %x[args], %[offsetof_rescale]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x5, #0x10]\n"
+    "ldp x10, x9, [x5, #0x20]\n"
+    "ldp x28, x27, [x5, #0x30]\n"
+    "ldp x26, x25, [x5, #0x40]\n"
+    "ldp x24, x23, [x5, #0x50]\n"
+    "ldp x22, x21, [x5, #0x60]\n"
+    "ldp x20, x19, [x5, #0x70]\n"
+    "ld1h { z7.h }, p1/Z, [x9, x4, LSL #1]\n"
     "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
     "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
     "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
-    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+    "ld1h { z3.h }, p1/Z, [x15, x4, LSL #1]\n"
     "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
     "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
-    "ld1h { z0.h }, p1/Z, [x26, x4, LSL #1]\n"
-    "ld1h { z31.h }, p1/Z, [x27, x4, LSL #1]\n"
-    "ld1h { z30.h }, p1/Z, [x23, x4, LSL #1]\n"
-    "ld1h { z29.h }, p1/Z, [x21, x4, LSL #1]\n"
-    "ld1h { z28.h }, p1/Z, [x20, x4, LSL #1]\n"
-    "ld1h { z27.h }, p1/Z, [x14, x4, LSL #1]\n"
-    "ld1h { z26.h }, p1/Z, [x11, x4, LSL #1]\n"
-    "ld1h { z25.h }, p1/Z, [x22, x4, LSL #1]\n"
-    "ld1h { z24.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "ld1h { z31.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "ld1h { z30.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "ld1h { z29.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "ld1h { z28.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x16, x4, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
     "incw x4\n"
     "whilelt p1.h, x4, x3\n"
+    "ld1rqh { z0.h }, p0/Z, [x14]\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "fadd z17.h, z8.h, z6.h\n"
-    "ld1h { z8.h }, p1/Z, [x9, x4, LSL #1]\n"
-    "whilelt p0.h, x5, x3\n"
+    "fadd z17.h, z7.h, z6.h\n"
     "fadd z16.h, z5.h, z4.h\n"
+    "ld1h { z7.h }, p1/Z, [x9, x4, LSL #1]\n"
     "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
+    "fadd z19.h, z17.h, z16.h\n"
     "fadd z18.h, z3.h, z2.h\n"
     "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
-    "fadd z23.h, z1.h, z0.h\n"
     "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
-    "fadd z22.h, z31.h, z30.h\n"
-    "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
-    "fadd z17.h, z17.h, z16.h\n"
+    "fadd z17.h, z1.h, z31.h\n"
+    "fadd z22.h, z30.h, z29.h\n"
+    "ld1h { z3.h }, p1/Z, [x15, x4, LSL #1]\n"
     "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
-    "fadd z16.h, z29.h, z28.h\n"
+    "fadd z16.h, z28.h, z27.h\n"
+    "fadd z21.h, z18.h, z19.h\n"
     "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
-    "fadd z19.h, z27.h, z23.h\n"
-    "ld1h { z0.h }, p1/Z, [x26, x4, LSL #1]\n"
-    "fadd z21.h, z18.h, z17.h\n"
-    "ld1h { z31.h }, p1/Z, [x27, x4, LSL #1]\n"
-    "fadd z20.h, z16.h, z17.h\n"
-    "ld1h { z30.h }, p1/Z, [x23, x4, LSL #1]\n"
-    "fadd z18.h, z26.h, z22.h\n"
-    "ld1h { z29.h }, p1/Z, [x21, x4, LSL #1]\n"
-    "fadd z17.h, z25.h, z23.h\n"
-    "ld1h { z28.h }, p1/Z, [x20, x4, LSL #1]\n"
-    "fadd z16.h, z24.h, z22.h\n"
-    "ld1h { z27.h }, p1/Z, [x14, x4, LSL #1]\n"
-    "fadd z19.h, z21.h, z19.h\n"
-    "ld1h { z26.h }, p1/Z, [x11, x4, LSL #1]\n"
-    "fadd z18.h, z21.h, z18.h\n"
-    "ld1h { z25.h }, p1/Z, [x22, x4, LSL #1]\n"
-    "fadd z17.h, z17.h, z20.h\n"
-    "ld1h { z24.h }, p1/Z, [x19, x4, LSL #1]\n"
+    "ld1h { z31.h }, p1/Z, [x26, x4, LSL #1]\n"
+    "fadd z20.h, z16.h, z19.h\n"
+    "fadd z19.h, z26.h, z17.h\n"
+    "ld1h { z30.h }, p1/Z, [x27, x4, LSL #1]\n"
+    "ld1h { z29.h }, p1/Z, [x23, x4, LSL #1]\n"
+    "fadd z18.h, z25.h, z22.h\n"
+    "fadd z17.h, z24.h, z17.h\n"
+    "ld1h { z28.h }, p1/Z, [x21, x4, LSL #1]\n"
+    "ld1h { z27.h }, p1/Z, [x20, x4, LSL #1]\n"
+    "fadd z16.h, z23.h, z22.h\n"
+    "ld1h { z26.h }, p1/Z, [x16, x4, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+    "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
     "incw x4\n"
-    "fadd z16.h, z20.h, z16.h\n"
+    "fadd z18.h, z18.h, z21.h\n"
+    "fadd z17.h, z17.h, z20.h\n"
+    "fadd z16.h, z16.h, z20.h\n"
+    "whilelt p0.h, x13, x3\n"
     "whilelt p1.h, x4, x3\n"
-    "fmul z19.h, z19.h, z7.h[0]\n"
-    "st1h { z19.h }, p0, [x8, x5, LSL #1]\n"
-    "fmul z18.h, z18.h, z7.h[1]\n"
-    "fmul z17.h, z17.h, z7.h[2]\n"
-    "st1h { z18.h }, p0, [x17, x5, LSL #1]\n"
-    "fmul z16.h, z16.h, z7.h[3]\n"
-    "st1h { z17.h }, p0, [x16, x5, LSL #1]\n"
-    "st1h { z16.h }, p0, [x15, x5, LSL #1]\n"
-    "incw x5\n"
+    "fmul z19.h, z19.h, z0.h[0]\n"
+    "fmul z18.h, z18.h, z0.h[1]\n"
+    "st1h { z19.h }, p0, [x6, x13, LSL #1]\n"
+    "fmul z17.h, z17.h, z0.h[2]\n"
+    "fmul z16.h, z16.h, z0.h[3]\n"
+    "st1h { z18.h }, p0, [x7, x13, LSL #1]\n"
+    "st1h { z17.h }, p0, [x8, x13, LSL #1]\n"
+    "st1h { z16.h }, p0, [x17, x13, LSL #1]\n"
+    "incw x13\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "fadd z17.h, z8.h, z6.h\n"
-    "whilelt p0.h, x5, x3\n"
+    "fadd z17.h, z7.h, z6.h\n"
     "fadd z16.h, z5.h, z4.h\n"
+    "whilelt p0.h, x13, x3\n"
+    "fadd z19.h, z17.h, z16.h\n"
     "fadd z18.h, z3.h, z2.h\n"
-    "fadd z23.h, z1.h, z0.h\n"
-    "fadd z17.h, z17.h, z16.h\n"
-    "fadd z22.h, z31.h, z30.h\n"
-    "fadd z16.h, z29.h, z28.h\n"
-    "fadd z21.h, z18.h, z17.h\n"
-    "fadd z19.h, z27.h, z23.h\n"
-    "fadd z20.h, z16.h, z17.h\n"
-    "fadd z18.h, z26.h, z22.h\n"
-    "fadd z17.h, z25.h, z23.h\n"
-    "fadd z16.h, z24.h, z22.h\n"
-    "fadd z19.h, z21.h, z19.h\n"
-    "fadd z18.h, z21.h, z18.h\n"
+    "fadd z17.h, z1.h, z31.h\n"
+    "fadd z22.h, z30.h, z29.h\n"
+    "fadd z16.h, z28.h, z27.h\n"
+    "fadd z21.h, z18.h, z19.h\n"
+    "fadd z20.h, z16.h, z19.h\n"
+    "fadd z19.h, z26.h, z17.h\n"
+    "fadd z18.h, z25.h, z22.h\n"
+    "fadd z17.h, z24.h, z17.h\n"
+    "fadd z16.h, z23.h, z22.h\n"
+    "fadd z19.h, z19.h, z21.h\n"
+    "fmul z19.h, z19.h, z0.h[0]\n"
+    "st1h { z19.h }, p0, [x6, x13, LSL #1]\n"
+    "fadd z18.h, z18.h, z21.h\n"
     "fadd z17.h, z17.h, z20.h\n"
-    "fadd z16.h, z20.h, z16.h\n"
-    "fmul z19.h, z19.h, z7.h[0]\n"
-    "st1h { z19.h }, p0, [x8, x5, LSL #1]\n"
-    "fmul z18.h, z18.h, z7.h[1]\n"
-    "fmul z17.h, z17.h, z7.h[2]\n"
-    "st1h { z18.h }, p0, [x17, x5, LSL #1]\n"
-    "fmul z16.h, z16.h, z7.h[3]\n"
-    "st1h { z17.h }, p0, [x16, x5, LSL #1]\n"
-    "st1h { z16.h }, p0, [x15, x5, LSL #1]\n"
+    "fmul z18.h, z18.h, z0.h[1]\n"
+    "fmul z17.h, z17.h, z0.h[2]\n"
+    "fadd z16.h, z16.h, z20.h\n"
+    "fmul z16.h, z16.h, z0.h[3]\n"
+    "st1h { z18.h }, p0, [x7, x13, LSL #1]\n"
+    "st1h { z17.h }, p0, [x8, x13, LSL #1]\n"
+    "st1h { z16.h }, p0, [x17, x13, LSL #1]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
-    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp
index 02f2ce8..4923148 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_fp16_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
 
-struct sve_fp16_nhwc_avg_generic_depthfirst
+struct sve_fp16_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_fp16_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
   sve_fp16_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_fp16_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 310df11..7081206 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS)
 
@@ -41,35 +42,35 @@
   const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
 
   __asm__ __volatile__(
-    "ptrue p0.b\n"
-    "ld1rh { z8.h }, p0/Z, [%x[rescale_ptr]]\n"
     "mov x28, #0x0\n"
     "cnth x27\n"
     "cnth x26, ALL, MUL #2\n"
     "cnth x25, ALL, MUL #3\n"
+    "ptrue p0.b\n"
     "whilelt p3.h, x28, %x[n_channels]\n"
+    "ld1rh { z7.h }, p0/Z, [%x[rescale_ptr]]\n"
     "whilelt p2.h, x27, %x[n_channels]\n"
     "whilelt p1.h, x26, %x[n_channels]\n"
     "whilelt p0.h, x25, %x[n_channels]\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z7.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z6.b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z6.b, #0x0\n"
     "mov z5.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z4.b, #0x0\n"
+    "mov z3.b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
-    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
     "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
     "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
     "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
@@ -82,47 +83,47 @@
     "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fadd z23.h, z3.h, z2.h\n"
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd z19.h, z1.h, z0.h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fadd z22.h, z31.h, z30.h\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "fadd z22.h, z30.h, z22.h\n"
     "fadd z18.h, z29.h, z28.h\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fadd z21.h, z27.h, z21.h\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
     "fadd z17.h, z26.h, z17.h\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
     "fadd z20.h, z25.h, z20.h\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
     "fadd z16.h, z24.h, z16.h\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
     "fadd z19.h, z23.h, z19.h\n"
-    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
     "fadd z18.h, z22.h, z18.h\n"
-    "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
     "fadd z17.h, z21.h, z17.h\n"
-    "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
     "fadd z16.h, z20.h, z16.h\n"
+    "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "fadd z5.h, z5.h, z18.h\n"
     "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
-    "fadd z7.h, z7.h, z19.h\n"
     "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
-    "fadd z6.h, z6.h, z18.h\n"
+    "fadd z4.h, z4.h, z17.h\n"
+    "fadd z3.h, z3.h, z16.h\n"
     "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
-    "fadd z5.h, z5.h, z17.h\n"
     "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
-    "fadd z4.h, z4.h, z16.h\n"
     "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
     "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
     "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
     "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fadd z23.h, z3.h, z2.h\n"
-    "fadd z19.h, z1.h, z0.h\n"
-    "fadd z22.h, z31.h, z30.h\n"
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
+    "fadd z22.h, z30.h, z22.h\n"
     "fadd z18.h, z29.h, z28.h\n"
     "fadd z21.h, z27.h, z21.h\n"
     "fadd z17.h, z26.h, z17.h\n"
@@ -132,96 +133,95 @@
     "fadd z18.h, z22.h, z18.h\n"
     "fadd z17.h, z21.h, z17.h\n"
     "fadd z16.h, z20.h, z16.h\n"
-    "fadd z7.h, z7.h, z19.h\n"
-    "fadd z6.h, z6.h, z18.h\n"
-    "fadd z5.h, z5.h, z17.h\n"
-    "fadd z4.h, z4.h, z16.h\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "fadd z5.h, z5.h, z18.h\n"
+    "fadd z4.h, z4.h, z17.h\n"
+    "fadd z3.h, z3.h, z16.h\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
     "subs x20, x20, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "fadd z7.h, z7.h, z3.h\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
+    "fadd z6.h, z6.h, z2.h\n"
+    "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
     "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
-    "fadd z6.h, z6.h, z31.h\n"
+    "fadd z5.h, z5.h, z30.h\n"
+    "fadd z4.h, z4.h, z27.h\n"
     "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
-    "fadd z5.h, z5.h, z27.h\n"
-    "fadd z4.h, z4.h, z25.h\n"
+    "fadd z3.h, z3.h, z25.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "fmul z7.h, z7.h, z8.h\n"
-    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
-    "fmul z6.h, z6.h, z8.h\n"
+    "fmul z6.h, z6.h, z7.h\n"
+    "fmul z5.h, z5.h, z7.h\n"
+    "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "fmul z4.h, z4.h, z7.h\n"
+    "fmul z3.h, z3.h, z7.h\n"
+    "st1h { z5.h }, p2, [%x[outptr], x27, LSL #1]\n"
+    "st1h { z4.h }, p1, [%x[outptr], x26, LSL #1]\n"
     "inch x28, ALL, MUL #4\n"
-    "fmul z5.h, z5.h, z8.h\n"
-    "st1h { z6.h }, p2, [%x[outptr], x27, LSL #1]\n"
-    "fmul z4.h, z4.h, z8.h\n"
     "inch x27, ALL, MUL #4\n"
-    "st1h { z5.h }, p1, [%x[outptr], x26, LSL #1]\n"
-    "inch x26, ALL, MUL #4\n"
-    "st1h { z4.h }, p0, [%x[outptr], x25, LSL #1]\n"
+    "st1h { z3.h }, p0, [%x[outptr], x25, LSL #1]\n"
     "inch x25, ALL, MUL #4\n"
     "whilelt p0.h, x25, %x[n_channels]\n"
+    "inch x26, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
     "whilelt p3.h, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fadd z23.h, z3.h, z2.h\n"
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd z19.h, z1.h, z0.h\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fadd z19.h, z23.h, z19.h\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "fadd z7.h, z7.h, z19.h\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z6.h, z6.h, z19.h\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fadd z23.h, z3.h, z2.h\n"
-    "fadd z19.h, z1.h, z0.h\n"
+    "fadd z23.h, z2.h, z1.h\n"
+    "fadd z19.h, z0.h, z31.h\n"
     "fadd z19.h, z23.h, z19.h\n"
-    "fadd z7.h, z7.h, z19.h\n"
+    "fadd z6.h, z6.h, z19.h\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
     "subs x20, x20, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "fadd z7.h, z7.h, z3.h\n"
+    "fadd z6.h, z6.h, z2.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "fmul z7.h, z7.h, z8.h\n"
-    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "fmul z6.h, z6.h, z7.h\n"
+    "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
     "inch x28\n"
     "whilelt p3.h, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 5e4327d..3691b6c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst
+struct sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
+  using Parent = DepthfirstStrategy<__fp16, __fp16>;
 
-  typedef void (*kern_type)(unsigned int, const __fp16 *const *const, __fp16 *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 9abd0f5..cda3d42 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -64,76 +64,76 @@
 
   __asm__ __volatile__(
     "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
-    "ptrue p2.b\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
     "mov x13, #0x0\n"
+    "whilelt p2.h, x13, x14\n"
     "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
-    "mov x12, #0x0\n"
-    "ldp x11, x10, [x20, #0x0]\n"
-    "whilelt p1.h, x13, x14\n"
+    "ldp x12, x11, [x20, #0x0]\n"
+    "ptrue p1.b\n"
+    "mov x10, #0x0\n"
     "ldp x9, x28, [x20, #0x10]\n"
     "ldp x27, x26, [x19, #0x0]\n"
     "ldp x25, x24, [x19, #0x10]\n"
     "ldp x23, x22, [x19, #0x20]\n"
     "ldp x21, x20, [x19, #0x30]\n"
     "ldr x19, [x19, #0x40]\n"
-    "ld1h { z31.h }, p1/Z, [x26, x13, LSL #1]\n"
-    "ld1h { z30.h }, p1/Z, [x23, x13, LSL #1]\n"
-    "ld1h { z29.h }, p1/Z, [x20, x13, LSL #1]\n"
-    "ld1h { z28.h }, p1/Z, [x24, x13, LSL #1]\n"
-    "ld1h { z27.h }, p1/Z, [x27, x13, LSL #1]\n"
-    "ld1h { z26.h }, p1/Z, [x22, x13, LSL #1]\n"
-    "ld1h { z25.h }, p1/Z, [x25, x13, LSL #1]\n"
-    "ld1h { z24.h }, p1/Z, [x21, x13, LSL #1]\n"
-    "ld1h { z23.h }, p1/Z, [x19, x13, LSL #1]\n"
+    "ld1h { z31.h }, p2/Z, [x26, x13, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x23, x13, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x20, x13, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x24, x13, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x27, x13, LSL #1]\n"
+    "ld1h { z26.h }, p2/Z, [x22, x13, LSL #1]\n"
+    "ld1h { z25.h }, p2/Z, [x25, x13, LSL #1]\n"
+    "ld1h { z24.h }, p2/Z, [x21, x13, LSL #1]\n"
+    "ld1h { z23.h }, p2/Z, [x19, x13, LSL #1]\n"
     "incw x13\n"
-    "whilelt p1.h, x13, x14\n"
+    "whilelt p2.h, x13, x14\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
-    "ld1h { z31.h }, p1/Z, [x26, x13, LSL #1]\n"
-    "whilelt p0.h, x12, x14\n"
-    "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
-    "ld1h { z30.h }, p1/Z, [x23, x13, LSL #1]\n"
-    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z27.h\n"
-    "ld1h { z29.h }, p1/Z, [x20, x13, LSL #1]\n"
-    "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
-    "ld1h { z27.h }, p1/Z, [x27, x13, LSL #1]\n"
-    "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z28.h\n"
-    "ld1h { z28.h }, p1/Z, [x24, x13, LSL #1]\n"
-    "movprfx z20, z26\n fmax z20.h, p2/M, z20.h, z23.h\n"
-    "ld1h { z26.h }, p1/Z, [x22, x13, LSL #1]\n"
-    "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
-    "ld1h { z25.h }, p1/Z, [x25, x13, LSL #1]\n"
-    "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z17.h\n"
-    "ld1h { z24.h }, p1/Z, [x21, x13, LSL #1]\n"
-    "movprfx z17, z21\n fmax z17.h, p2/M, z17.h, z16.h\n"
-    "ld1h { z23.h }, p1/Z, [x19, x13, LSL #1]\n"
+    "movprfx z22, z31\n fmax z22.h, p1/M, z22.h, z30.h\n"
+    "movprfx z21, z30\n fmax z21.h, p1/M, z21.h, z29.h\n"
+    "ld1h { z31.h }, p2/Z, [x26, x13, LSL #1]\n"
+    "ld1h { z30.h }, p2/Z, [x23, x13, LSL #1]\n"
+    "movprfx z20, z28\n fmax z20.h, p1/M, z20.h, z27.h\n"
+    "movprfx z17, z26\n fmax z17.h, p1/M, z17.h, z25.h\n"
+    "ld1h { z29.h }, p2/Z, [x20, x13, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x27, x13, LSL #1]\n"
+    "movprfx z19, z24\n fmax z19.h, p1/M, z19.h, z28.h\n"
+    "movprfx z18, z26\n fmax z18.h, p1/M, z18.h, z23.h\n"
+    "ld1h { z28.h }, p2/Z, [x24, x13, LSL #1]\n"
+    "ld1h { z26.h }, p2/Z, [x22, x13, LSL #1]\n"
+    "ld1h { z25.h }, p2/Z, [x25, x13, LSL #1]\n"
+    "ld1h { z24.h }, p2/Z, [x21, x13, LSL #1]\n"
+    "whilelt p0.h, x10, x14\n"
+    "movprfx z16, z22\n fmax z16.h, p1/M, z16.h, z20.h\n"
+    "ld1h { z23.h }, p2/Z, [x19, x13, LSL #1]\n"
     "incw x13\n"
-    "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
-    "st1h { z19.h }, p0, [x11, x12, LSL #1]\n"
-    "whilelt p1.h, x13, x14\n"
-    "st1h { z18.h }, p0, [x10, x12, LSL #1]\n"
-    "st1h { z17.h }, p0, [x9, x12, LSL #1]\n"
-    "st1h { z16.h }, p0, [x28, x12, LSL #1]\n"
-    "incw x12\n"
+    "whilelt p2.h, x13, x14\n"
+    "st1h { z16.h }, p0, [x12, x10, LSL #1]\n"
+    "movprfx z16, z17\n fmax z16.h, p1/M, z16.h, z22.h\n"
+    "movprfx z17, z21\n fmax z17.h, p1/M, z17.h, z19.h\n"
+    "st1h { z16.h }, p0, [x11, x10, LSL #1]\n"
+    "movprfx z16, z21\n fmax z16.h, p1/M, z16.h, z18.h\n"
+    "st1h { z17.h }, p0, [x9, x10, LSL #1]\n"
+    "st1h { z16.h }, p0, [x28, x10, LSL #1]\n"
+    "incw x10\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
-    "whilelt p0.h, x12, x14\n"
-    "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
-    "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z27.h\n"
-    "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
-    "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z28.h\n"
-    "movprfx z20, z26\n fmax z20.h, p2/M, z20.h, z23.h\n"
-    "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
-    "st1h { z19.h }, p0, [x11, x12, LSL #1]\n"
-    "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z17.h\n"
-    "movprfx z17, z21\n fmax z17.h, p2/M, z17.h, z16.h\n"
-    "st1h { z18.h }, p0, [x10, x12, LSL #1]\n"
-    "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
-    "st1h { z17.h }, p0, [x9, x12, LSL #1]\n"
-    "st1h { z16.h }, p0, [x28, x12, LSL #1]\n"
+    "movprfx z22, z31\n fmax z22.h, p1/M, z22.h, z30.h\n"
+    "movprfx z21, z30\n fmax z21.h, p1/M, z21.h, z29.h\n"
+    "movprfx z20, z28\n fmax z20.h, p1/M, z20.h, z27.h\n"
+    "movprfx z17, z26\n fmax z17.h, p1/M, z17.h, z25.h\n"
+    "movprfx z19, z24\n fmax z19.h, p1/M, z19.h, z28.h\n"
+    "movprfx z18, z26\n fmax z18.h, p1/M, z18.h, z23.h\n"
+    "whilelt p0.h, x10, x14\n"
+    "movprfx z16, z22\n fmax z16.h, p1/M, z16.h, z20.h\n"
+    "st1h { z16.h }, p0, [x12, x10, LSL #1]\n"
+    "movprfx z16, z17\n fmax z16.h, p1/M, z16.h, z22.h\n"
+    "movprfx z17, z21\n fmax z17.h, p1/M, z17.h, z19.h\n"
+    "st1h { z16.h }, p0, [x11, x10, LSL #1]\n"
+    "movprfx z16, z21\n fmax z16.h, p1/M, z16.h, z18.h\n"
+    "st1h { z17.h }, p0, [x9, x10, LSL #1]\n"
+    "st1h { z16.h }, p0, [x28, x10, LSL #1]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp
index 44cdea3..0ef0a79 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_fp16_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
 
-struct sve_fp16_nhwc_max_generic_depthfirst
+struct sve_fp16_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<__fp16, __fp16>
 {
-  typedef __fp16 operand_type;
-  typedef __fp16 return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const __fp16 *const *const inptrs, __fp16 *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_fp16_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<__fp16, __fp16>;
   sve_fp16_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_fp16_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
index fae1f01..3b07bef 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS)
 
@@ -39,181 +40,180 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cnth x27\n"
     "cnth x26, ALL, MUL #2\n"
     "cnth x25, ALL, MUL #3\n"
-    "whilelt p3.h, x28, %x[n_channels]\n"
-    "whilelt p2.h, x27, %x[n_channels]\n"
-    "whilelt p1.h, x26, %x[n_channels]\n"
-    "whilelt p0.h, x25, %x[n_channels]\n"
+    "whilelt p4.h, x28, %x[n_channels]\n"
+    "whilelt p3.h, x27, %x[n_channels]\n"
+    "whilelt p2.h, x26, %x[n_channels]\n"
+    "whilelt p1.h, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.h, #0xfc00\n"
     "mov z7.h, #0xfc00\n"
     "mov x19, %x[inptrs]\n"
     "mov z6.h, #0xfc00\n"
-    "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z5.h, #0xfc00\n"
-    "mov z4.h, #0xfc00\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
-    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
-    "ld1h { z22.h }, p2/Z, [x21, x27, LSL #1]\n"
-    "ld1h { z29.h }, p2/Z, [x20, x27, LSL #1]\n"
-    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
-    "ld1h { z27.h }, p1/Z, [x22, x26, LSL #1]\n"
-    "ld1h { z21.h }, p1/Z, [x21, x26, LSL #1]\n"
-    "ld1h { z26.h }, p1/Z, [x20, x26, LSL #1]\n"
-    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
-    "ld1h { z25.h }, p0/Z, [x22, x25, LSL #1]\n"
-    "ld1h { z20.h }, p0/Z, [x21, x25, LSL #1]\n"
-    "ld1h { z24.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x22, x27, LSL #1]\n"
+    "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z30.h }, p3/Z, [x20, x27, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x22, x26, LSL #1]\n"
+    "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x20, x26, LSL #1]\n"
+    "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
+    "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n fmax z18.h, p0/M, z18.h, z31.h\n"
+    "fmax z22.h, p0/M, z22.h, z30.h\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "movprfx z17, z29\n fmax z17.h, p0/M, z17.h, z28.h\n"
+    "fmax z21.h, p0/M, z21.h, z27.h\n"
+    "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
+    "movprfx z16, z26\n fmax z16.h, p0/M, z16.h, z25.h\n"
+    "fmax z20.h, p0/M, z20.h, z24.h\n"
+    "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z31.h }, p3/Z, [x22, x27, LSL #1]\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z18.h, p0/M, z18.h, z22.h\n"
+    "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+    "ld1h { z30.h }, p3/Z, [x20, x27, LSL #1]\n"
+    "fmax z17.h, p0/M, z17.h, z21.h\n"
+    "fmax z16.h, p0/M, z16.h, z20.h\n"
+    "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "ld1h { z28.h }, p2/Z, [x22, x26, LSL #1]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z8.h, p0/M, z8.h, z19.h\n"
+    "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+    "ld1h { z27.h }, p2/Z, [x20, x26, LSL #1]\n"
+    "fmax z7.h, p0/M, z7.h, z18.h\n"
+    "fmax z6.h, p0/M, z6.h, z17.h\n"
+    "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+    "fmax z5.h, p0/M, z5.h, z16.h\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "fmax z22.h, p4/M, z22.h, z29.h\n"
-    "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "fmax z21.h, p4/M, z21.h, z26.h\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "fmax z16.h, p4/M, z16.h, z25.h\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
-    "fmax z20.h, p4/M, z20.h, z24.h\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
-    "fmax z19.h, p4/M, z19.h, z23.h\n"
-    "ld1h { z30.h }, p2/Z, [x22, x27, LSL #1]\n"
-    "fmax z18.h, p4/M, z18.h, z22.h\n"
-    "ld1h { z22.h }, p2/Z, [x21, x27, LSL #1]\n"
-    "fmax z17.h, p4/M, z17.h, z21.h\n"
-    "ld1h { z29.h }, p2/Z, [x20, x27, LSL #1]\n"
-    "fmax z16.h, p4/M, z16.h, z20.h\n"
-    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
-    "fmax z7.h, p4/M, z7.h, z19.h\n"
-    "ld1h { z27.h }, p1/Z, [x22, x26, LSL #1]\n"
-    "fmax z6.h, p4/M, z6.h, z18.h\n"
-    "ld1h { z21.h }, p1/Z, [x21, x26, LSL #1]\n"
-    "fmax z5.h, p4/M, z5.h, z17.h\n"
-    "ld1h { z26.h }, p1/Z, [x20, x26, LSL #1]\n"
-    "fmax z4.h, p4/M, z4.h, z16.h\n"
-    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
-    "ld1h { z25.h }, p0/Z, [x22, x25, LSL #1]\n"
-    "ld1h { z20.h }, p0/Z, [x21, x25, LSL #1]\n"
-    "ld1h { z24.h }, p0/Z, [x20, x25, LSL #1]\n"
+    "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+    "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
-    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
-    "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
-    "fmax z22.h, p4/M, z22.h, z29.h\n"
-    "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
-    "fmax z21.h, p4/M, z21.h, z26.h\n"
-    "fmax z16.h, p4/M, z16.h, z25.h\n"
-    "fmax z20.h, p4/M, z20.h, z24.h\n"
-    "fmax z19.h, p4/M, z19.h, z23.h\n"
-    "fmax z18.h, p4/M, z18.h, z22.h\n"
-    "fmax z17.h, p4/M, z17.h, z21.h\n"
-    "fmax z16.h, p4/M, z16.h, z20.h\n"
-    "fmax z7.h, p4/M, z7.h, z19.h\n"
-    "fmax z6.h, p4/M, z6.h, z18.h\n"
-    "fmax z5.h, p4/M, z5.h, z17.h\n"
-    "fmax z4.h, p4/M, z4.h, z16.h\n"
+    "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
+    "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
+    "movprfx z18, z0\n fmax z18.h, p0/M, z18.h, z31.h\n"
+    "fmax z22.h, p0/M, z22.h, z30.h\n"
+    "movprfx z17, z29\n fmax z17.h, p0/M, z17.h, z28.h\n"
+    "fmax z21.h, p0/M, z21.h, z27.h\n"
+    "movprfx z16, z26\n fmax z16.h, p0/M, z16.h, z25.h\n"
+    "fmax z20.h, p0/M, z20.h, z24.h\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z18.h, p0/M, z18.h, z22.h\n"
+    "fmax z17.h, p0/M, z17.h, z21.h\n"
+    "fmax z16.h, p0/M, z16.h, z20.h\n"
+    "fmax z8.h, p0/M, z8.h, z19.h\n"
+    "fmax z7.h, p0/M, z7.h, z18.h\n"
+    "fmax z6.h, p0/M, z6.h, z17.h\n"
+    "fmax z5.h, p0/M, z5.h, z16.h\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
     "subs x20, x20, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "fmax z7.h, p4/M, z7.h, z3.h\n"
-    "ld1h { z31.h }, p2/Z, [x23, x27, LSL #1]\n"
-    "ld1h { z28.h }, p1/Z, [x23, x26, LSL #1]\n"
-    "fmax z6.h, p4/M, z6.h, z31.h\n"
-    "ld1h { z16.h }, p0/Z, [x23, x25, LSL #1]\n"
-    "fmax z5.h, p4/M, z5.h, z28.h\n"
-    "fmax z4.h, p4/M, z4.h, z16.h\n"
+    "fmax z8.h, p0/M, z8.h, z4.h\n"
+    "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+    "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
+    "fmax z7.h, p0/M, z7.h, z0.h\n"
+    "fmax z6.h, p0/M, z6.h, z29.h\n"
+    "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
+    "fmax z5.h, p0/M, z5.h, z26.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "st1h { z8.h }, p4, [%x[outptr], x28, LSL #1]\n"
     "inch x28, ALL, MUL #4\n"
-    "st1h { z6.h }, p2, [%x[outptr], x27, LSL #1]\n"
+    "st1h { z7.h }, p3, [%x[outptr], x27, LSL #1]\n"
     "inch x27, ALL, MUL #4\n"
-    "st1h { z5.h }, p1, [%x[outptr], x26, LSL #1]\n"
+    "st1h { z6.h }, p2, [%x[outptr], x26, LSL #1]\n"
     "inch x26, ALL, MUL #4\n"
-    "st1h { z4.h }, p0, [%x[outptr], x25, LSL #1]\n"
+    "st1h { z5.h }, p1, [%x[outptr], x25, LSL #1]\n"
     "inch x25, ALL, MUL #4\n"
-    "whilelt p0.h, x25, %x[n_channels]\n"
+    "whilelt p1.h, x25, %x[n_channels]\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.h, x28, %x[n_channels]\n"
+    "whilelt p4.h, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.h, #0xfc00\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.h, #0xfc00\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "add x19, x19, #0x20\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
+    "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
+    "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "subs x24, x24, #0x1\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+    "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+    "fmax z8.h, p0/M, z8.h, z19.h\n"
     "add x19, x19, #0x20\n"
-    "fmax z19.h, p4/M, z19.h, z23.h\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "ld1h { z2.h }, p3/Z, [x22, x28, LSL #1]\n"
-    "fmax z7.h, p4/M, z7.h, z19.h\n"
-    "ld1h { z1.h }, p3/Z, [x21, x28, LSL #1]\n"
-    "ld1h { z0.h }, p3/Z, [x20, x28, LSL #1]\n"
+    "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+    "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n fmax z19.h, p4/M, z19.h, z2.h\n"
-    "movprfx z23, z1\n fmax z23.h, p4/M, z23.h, z0.h\n"
-    "fmax z19.h, p4/M, z19.h, z23.h\n"
-    "fmax z7.h, p4/M, z7.h, z19.h\n"
+    "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
+    "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
+    "fmax z19.h, p0/M, z19.h, z23.h\n"
+    "fmax z8.h, p0/M, z8.h, z19.h\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
     "subs x20, x20, #0x1\n"
-    "ld1h { z3.h }, p3/Z, [x23, x28, LSL #1]\n"
-    "fmax z7.h, p4/M, z7.h, z3.h\n"
+    "fmax z8.h, p0/M, z8.h, z4.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+    "st1h { z8.h }, p4, [%x[outptr], x28, LSL #1]\n"
     "inch x28\n"
-    "whilelt p3.h, x28, %x[n_channels]\n"
+    "whilelt p4.h, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
index 55d2a47..d5578d6 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst
+struct sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
+  using Parent = DepthfirstStrategy<float, float>;
 
-  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::AVERAGE;
+  const static auto pool_rows = 3u, pool_cols = 3u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
+  sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 3; }
-  constexpr static unsigned int pool_cols(void) { return 3; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl;
-
-  sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 6cad63e..cd765b3 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -83,121 +83,121 @@
 
   __asm__ __volatile__(
     "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
-    "mov x4, #0x0\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
-    "mov x5, #0x0\n"
-    "ldr x6, [%x[args], %[offsetof_inptrs]]\n"
+    "mov x4, #0x0\n"
     "mov x19, #0x4\n"
-    "add x7, %x[args], %[offsetof_rescale]\n"
-    "ldp x8, x17, [x20, #0x0]\n"
-    "ldp x16, x15, [x20, #0x10]\n"
+    "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+    "ldp x6, x7, [x20, #0x0]\n"
     "whilelt p0.s, XZR, x19\n"
-    "ldp x14, x13, [x6, #0x0]\n"
     "whilelt p1.s, x4, x3\n"
-    "ldp x12, x11, [x6, #0x10]\n"
-    "ldp x10, x9, [x6, #0x20]\n"
-    "ldp x28, x27, [x6, #0x30]\n"
-    "ldp x26, x25, [x6, #0x40]\n"
-    "ldp x24, x23, [x6, #0x50]\n"
-    "ldp x22, x21, [x6, #0x60]\n"
-    "ldp x20, x19, [x6, #0x70]\n"
-    "ld1rqw { z7.s }, p0/Z, [x7]\n"
-    "ld1w { z8.s }, p1/Z, [x9, x4, LSL #2]\n"
+    "ldp x8, x17, [x20, #0x10]\n"
+    "ldp x16, x15, [x5, #0x0]\n"
+    "add x14, %x[args], %[offsetof_rescale]\n"
+    "mov x13, #0x0\n"
+    "ldp x12, x11, [x5, #0x10]\n"
+    "ldp x10, x9, [x5, #0x20]\n"
+    "ldp x28, x27, [x5, #0x30]\n"
+    "ldp x26, x25, [x5, #0x40]\n"
+    "ldp x24, x23, [x5, #0x50]\n"
+    "ldp x22, x21, [x5, #0x60]\n"
+    "ldp x20, x19, [x5, #0x70]\n"
+    "ld1w { z7.s }, p1/Z, [x9, x4, LSL #2]\n"
     "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
     "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
     "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
-    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+    "ld1w { z3.s }, p1/Z, [x15, x4, LSL #2]\n"
     "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
     "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
-    "ld1w { z0.s }, p1/Z, [x26, x4, LSL #2]\n"
-    "ld1w { z31.s }, p1/Z, [x27, x4, LSL #2]\n"
-    "ld1w { z30.s }, p1/Z, [x23, x4, LSL #2]\n"
-    "ld1w { z29.s }, p1/Z, [x21, x4, LSL #2]\n"
-    "ld1w { z28.s }, p1/Z, [x20, x4, LSL #2]\n"
-    "ld1w { z27.s }, p1/Z, [x14, x4, LSL #2]\n"
-    "ld1w { z26.s }, p1/Z, [x11, x4, LSL #2]\n"
-    "ld1w { z25.s }, p1/Z, [x22, x4, LSL #2]\n"
-    "ld1w { z24.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "ld1w { z31.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "ld1w { z30.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "ld1w { z29.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "ld1w { z28.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x16, x4, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
     "incw x4\n"
     "whilelt p1.s, x4, x3\n"
+    "ld1rqw { z0.s }, p0/Z, [x14]\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "fadd z17.s, z8.s, z6.s\n"
-    "ld1w { z8.s }, p1/Z, [x9, x4, LSL #2]\n"
-    "whilelt p0.s, x5, x3\n"
+    "fadd z17.s, z7.s, z6.s\n"
     "fadd z16.s, z5.s, z4.s\n"
+    "ld1w { z7.s }, p1/Z, [x9, x4, LSL #2]\n"
     "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
+    "fadd z19.s, z17.s, z16.s\n"
     "fadd z18.s, z3.s, z2.s\n"
     "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
-    "fadd z23.s, z1.s, z0.s\n"
     "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
-    "fadd z22.s, z31.s, z30.s\n"
-    "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
-    "fadd z17.s, z17.s, z16.s\n"
+    "fadd z17.s, z1.s, z31.s\n"
+    "fadd z22.s, z30.s, z29.s\n"
+    "ld1w { z3.s }, p1/Z, [x15, x4, LSL #2]\n"
     "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
-    "fadd z16.s, z29.s, z28.s\n"
+    "fadd z16.s, z28.s, z27.s\n"
+    "fadd z21.s, z18.s, z19.s\n"
     "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
-    "fadd z19.s, z27.s, z23.s\n"
-    "ld1w { z0.s }, p1/Z, [x26, x4, LSL #2]\n"
-    "fadd z21.s, z18.s, z17.s\n"
-    "ld1w { z31.s }, p1/Z, [x27, x4, LSL #2]\n"
-    "fadd z20.s, z16.s, z17.s\n"
-    "ld1w { z30.s }, p1/Z, [x23, x4, LSL #2]\n"
-    "fadd z18.s, z26.s, z22.s\n"
-    "ld1w { z29.s }, p1/Z, [x21, x4, LSL #2]\n"
-    "fadd z17.s, z25.s, z23.s\n"
-    "ld1w { z28.s }, p1/Z, [x20, x4, LSL #2]\n"
-    "fadd z16.s, z24.s, z22.s\n"
-    "ld1w { z27.s }, p1/Z, [x14, x4, LSL #2]\n"
-    "fadd z19.s, z21.s, z19.s\n"
-    "ld1w { z26.s }, p1/Z, [x11, x4, LSL #2]\n"
-    "fadd z18.s, z21.s, z18.s\n"
-    "ld1w { z25.s }, p1/Z, [x22, x4, LSL #2]\n"
-    "fadd z17.s, z17.s, z20.s\n"
-    "ld1w { z24.s }, p1/Z, [x19, x4, LSL #2]\n"
+    "ld1w { z31.s }, p1/Z, [x26, x4, LSL #2]\n"
+    "fadd z20.s, z16.s, z19.s\n"
+    "fadd z19.s, z26.s, z17.s\n"
+    "ld1w { z30.s }, p1/Z, [x27, x4, LSL #2]\n"
+    "ld1w { z29.s }, p1/Z, [x23, x4, LSL #2]\n"
+    "fadd z18.s, z25.s, z22.s\n"
+    "fadd z17.s, z24.s, z17.s\n"
+    "ld1w { z28.s }, p1/Z, [x21, x4, LSL #2]\n"
+    "ld1w { z27.s }, p1/Z, [x20, x4, LSL #2]\n"
+    "fadd z16.s, z23.s, z22.s\n"
+    "ld1w { z26.s }, p1/Z, [x16, x4, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+    "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
     "incw x4\n"
-    "fadd z16.s, z20.s, z16.s\n"
+    "fadd z18.s, z18.s, z21.s\n"
+    "fadd z17.s, z17.s, z20.s\n"
+    "fadd z16.s, z16.s, z20.s\n"
+    "whilelt p0.s, x13, x3\n"
     "whilelt p1.s, x4, x3\n"
-    "fmul z19.s, z19.s, z7.s[0]\n"
-    "st1w { z19.s }, p0, [x8, x5, LSL #2]\n"
-    "fmul z18.s, z18.s, z7.s[1]\n"
-    "fmul z17.s, z17.s, z7.s[2]\n"
-    "st1w { z18.s }, p0, [x17, x5, LSL #2]\n"
-    "fmul z16.s, z16.s, z7.s[3]\n"
-    "st1w { z17.s }, p0, [x16, x5, LSL #2]\n"
-    "st1w { z16.s }, p0, [x15, x5, LSL #2]\n"
-    "incw x5\n"
+    "fmul z19.s, z19.s, z0.s[0]\n"
+    "fmul z18.s, z18.s, z0.s[1]\n"
+    "st1w { z19.s }, p0, [x6, x13, LSL #2]\n"
+    "fmul z17.s, z17.s, z0.s[2]\n"
+    "fmul z16.s, z16.s, z0.s[3]\n"
+    "st1w { z18.s }, p0, [x7, x13, LSL #2]\n"
+    "st1w { z17.s }, p0, [x8, x13, LSL #2]\n"
+    "st1w { z16.s }, p0, [x17, x13, LSL #2]\n"
+    "incw x13\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "fadd z17.s, z8.s, z6.s\n"
-    "whilelt p0.s, x5, x3\n"
+    "fadd z17.s, z7.s, z6.s\n"
     "fadd z16.s, z5.s, z4.s\n"
+    "whilelt p0.s, x13, x3\n"
+    "fadd z19.s, z17.s, z16.s\n"
     "fadd z18.s, z3.s, z2.s\n"
-    "fadd z23.s, z1.s, z0.s\n"
-    "fadd z17.s, z17.s, z16.s\n"
-    "fadd z22.s, z31.s, z30.s\n"
-    "fadd z16.s, z29.s, z28.s\n"
-    "fadd z21.s, z18.s, z17.s\n"
-    "fadd z19.s, z27.s, z23.s\n"
-    "fadd z20.s, z16.s, z17.s\n"
-    "fadd z18.s, z26.s, z22.s\n"
-    "fadd z17.s, z25.s, z23.s\n"
-    "fadd z16.s, z24.s, z22.s\n"
-    "fadd z19.s, z21.s, z19.s\n"
-    "fadd z18.s, z21.s, z18.s\n"
+    "fadd z17.s, z1.s, z31.s\n"
+    "fadd z22.s, z30.s, z29.s\n"
+    "fadd z16.s, z28.s, z27.s\n"
+    "fadd z21.s, z18.s, z19.s\n"
+    "fadd z20.s, z16.s, z19.s\n"
+    "fadd z19.s, z26.s, z17.s\n"
+    "fadd z18.s, z25.s, z22.s\n"
+    "fadd z17.s, z24.s, z17.s\n"
+    "fadd z16.s, z23.s, z22.s\n"
+    "fadd z19.s, z19.s, z21.s\n"
+    "fmul z19.s, z19.s, z0.s[0]\n"
+    "st1w { z19.s }, p0, [x6, x13, LSL #2]\n"
+    "fadd z18.s, z18.s, z21.s\n"
     "fadd z17.s, z17.s, z20.s\n"
-    "fadd z16.s, z20.s, z16.s\n"
-    "fmul z19.s, z19.s, z7.s[0]\n"
-    "st1w { z19.s }, p0, [x8, x5, LSL #2]\n"
-    "fmul z18.s, z18.s, z7.s[1]\n"
-    "fmul z17.s, z17.s, z7.s[2]\n"
-    "st1w { z18.s }, p0, [x17, x5, LSL #2]\n"
-    "fmul z16.s, z16.s, z7.s[3]\n"
-    "st1w { z17.s }, p0, [x16, x5, LSL #2]\n"
-    "st1w { z16.s }, p0, [x15, x5, LSL #2]\n"
+    "fmul z18.s, z18.s, z0.s[1]\n"
+    "fmul z17.s, z17.s, z0.s[2]\n"
+    "fadd z16.s, z16.s, z20.s\n"
+    "fmul z16.s, z16.s, z0.s[3]\n"
+    "st1w { z18.s }, p0, [x7, x13, LSL #2]\n"
+    "st1w { z17.s }, p0, [x8, x13, LSL #2]\n"
+    "st1w { z16.s }, p0, [x17, x13, LSL #2]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
-    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp
index 0fcdcb2..a9e6b03 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_fp32_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
 
-struct sve_fp32_nhwc_avg_generic_depthfirst
+struct sve_fp32_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_fp32_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<float, float>;
   sve_fp32_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_fp32_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 3e02570..bb60fe8 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 
@@ -41,35 +42,35 @@
   const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
 
   __asm__ __volatile__(
-    "ptrue p0.b\n"
-    "ld1rw { z8.s }, p0/Z, [%x[rescale_ptr]]\n"
     "mov x28, #0x0\n"
     "cntw x27\n"
     "cntw x26, ALL, MUL #2\n"
     "cntw x25, ALL, MUL #3\n"
+    "ptrue p0.b\n"
     "whilelt p3.s, x28, %x[n_channels]\n"
+    "ld1rw { z7.s }, p0/Z, [%x[rescale_ptr]]\n"
     "whilelt p2.s, x27, %x[n_channels]\n"
     "whilelt p1.s, x26, %x[n_channels]\n"
     "whilelt p0.s, x25, %x[n_channels]\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z7.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z6.b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z6.b, #0x0\n"
     "mov z5.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z4.b, #0x0\n"
+    "mov z3.b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
-    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
     "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
     "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
     "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
@@ -82,47 +83,47 @@
     "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "fadd z23.s, z3.s, z2.s\n"
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd z19.s, z1.s, z0.s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
-    "fadd z22.s, z31.s, z30.s\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "fadd z22.s, z30.s, z22.s\n"
     "fadd z18.s, z29.s, z28.s\n"
+    "subs x24, x24, #0x1\n"
+    "add x19, x19, #0x20\n"
     "fadd z21.s, z27.s, z21.s\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
     "fadd z17.s, z26.s, z17.s\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
     "fadd z20.s, z25.s, z20.s\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
     "fadd z16.s, z24.s, z16.s\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
     "fadd z19.s, z23.s, z19.s\n"
-    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
     "fadd z18.s, z22.s, z18.s\n"
-    "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
     "fadd z17.s, z21.s, z17.s\n"
-    "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
     "fadd z16.s, z20.s, z16.s\n"
+    "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "fadd z5.s, z5.s, z18.s\n"
     "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
-    "fadd z7.s, z7.s, z19.s\n"
     "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
-    "fadd z6.s, z6.s, z18.s\n"
+    "fadd z4.s, z4.s, z17.s\n"
+    "fadd z3.s, z3.s, z16.s\n"
     "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
-    "fadd z5.s, z5.s, z17.s\n"
     "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
-    "fadd z4.s, z4.s, z16.s\n"
     "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
     "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
     "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
     "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "fadd z23.s, z3.s, z2.s\n"
-    "fadd z19.s, z1.s, z0.s\n"
-    "fadd z22.s, z31.s, z30.s\n"
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
+    "fadd z22.s, z30.s, z22.s\n"
     "fadd z18.s, z29.s, z28.s\n"
     "fadd z21.s, z27.s, z21.s\n"
     "fadd z17.s, z26.s, z17.s\n"
@@ -132,96 +133,95 @@
     "fadd z18.s, z22.s, z18.s\n"
     "fadd z17.s, z21.s, z17.s\n"
     "fadd z16.s, z20.s, z16.s\n"
-    "fadd z7.s, z7.s, z19.s\n"
-    "fadd z6.s, z6.s, z18.s\n"
-    "fadd z5.s, z5.s, z17.s\n"
-    "fadd z4.s, z4.s, z16.s\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "fadd z5.s, z5.s, z18.s\n"
+    "fadd z4.s, z4.s, z17.s\n"
+    "fadd z3.s, z3.s, z16.s\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
     "subs x20, x20, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "fadd z7.s, z7.s, z3.s\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
+    "fadd z6.s, z6.s, z2.s\n"
+    "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
     "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
-    "fadd z6.s, z6.s, z31.s\n"
+    "fadd z5.s, z5.s, z30.s\n"
+    "fadd z4.s, z4.s, z27.s\n"
     "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
-    "fadd z5.s, z5.s, z27.s\n"
-    "fadd z4.s, z4.s, z25.s\n"
+    "fadd z3.s, z3.s, z25.s\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "fmul z7.s, z7.s, z8.s\n"
-    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
-    "fmul z6.s, z6.s, z8.s\n"
+    "fmul z6.s, z6.s, z7.s\n"
+    "fmul z5.s, z5.s, z7.s\n"
+    "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "fmul z4.s, z4.s, z7.s\n"
+    "fmul z3.s, z3.s, z7.s\n"
+    "st1w { z5.s }, p2, [%x[outptr], x27, LSL #2]\n"
+    "st1w { z4.s }, p1, [%x[outptr], x26, LSL #2]\n"
     "incw x28, ALL, MUL #4\n"
-    "fmul z5.s, z5.s, z8.s\n"
-    "st1w { z6.s }, p2, [%x[outptr], x27, LSL #2]\n"
-    "fmul z4.s, z4.s, z8.s\n"
     "incw x27, ALL, MUL #4\n"
-    "st1w { z5.s }, p1, [%x[outptr], x26, LSL #2]\n"
-    "incw x26, ALL, MUL #4\n"
-    "st1w { z4.s }, p0, [%x[outptr], x25, LSL #2]\n"
+    "st1w { z3.s }, p0, [%x[outptr], x25, LSL #2]\n"
     "incw x25, ALL, MUL #4\n"
     "whilelt p0.s, x25, %x[n_channels]\n"
+    "incw x26, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
     "whilelt p3.s, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z6.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "fadd z23.s, z3.s, z2.s\n"
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "fadd z19.s, z1.s, z0.s\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "fadd z19.s, z23.s, z19.s\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "fadd z7.s, z7.s, z19.s\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "subs x24, x24, #0x1\n"
+    "fadd z6.s, z6.s, z19.s\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "fadd z23.s, z3.s, z2.s\n"
-    "fadd z19.s, z1.s, z0.s\n"
+    "fadd z23.s, z2.s, z1.s\n"
+    "fadd z19.s, z0.s, z31.s\n"
     "fadd z19.s, z23.s, z19.s\n"
-    "fadd z7.s, z7.s, z19.s\n"
+    "fadd z6.s, z6.s, z19.s\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
     "subs x20, x20, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "fadd z7.s, z7.s, z3.s\n"
+    "fadd z6.s, z6.s, z2.s\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "fmul z7.s, z7.s, z8.s\n"
-    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "fmul z6.s, z6.s, z7.s\n"
+    "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
     "incw x28\n"
     "whilelt p3.s, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index b2c6912..b97e362 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst
+struct sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
+  using Parent = DepthfirstStrategy<float, float>;
 
-  typedef void (*kern_type)(unsigned int, const float *const *const, float *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 786e477..122ee05 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -64,76 +64,76 @@
 
   __asm__ __volatile__(
     "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
-    "ptrue p2.b\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
     "mov x13, #0x0\n"
+    "whilelt p2.s, x13, x14\n"
     "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
-    "mov x12, #0x0\n"
-    "ldp x11, x10, [x20, #0x0]\n"
-    "whilelt p1.s, x13, x14\n"
+    "ldp x12, x11, [x20, #0x0]\n"
+    "ptrue p1.b\n"
+    "mov x10, #0x0\n"
     "ldp x9, x28, [x20, #0x10]\n"
     "ldp x27, x26, [x19, #0x0]\n"
     "ldp x25, x24, [x19, #0x10]\n"
     "ldp x23, x22, [x19, #0x20]\n"
     "ldp x21, x20, [x19, #0x30]\n"
     "ldr x19, [x19, #0x40]\n"
-    "ld1w { z31.s }, p1/Z, [x26, x13, LSL #2]\n"
-    "ld1w { z30.s }, p1/Z, [x23, x13, LSL #2]\n"
-    "ld1w { z29.s }, p1/Z, [x20, x13, LSL #2]\n"
-    "ld1w { z28.s }, p1/Z, [x24, x13, LSL #2]\n"
-    "ld1w { z27.s }, p1/Z, [x27, x13, LSL #2]\n"
-    "ld1w { z26.s }, p1/Z, [x22, x13, LSL #2]\n"
-    "ld1w { z25.s }, p1/Z, [x25, x13, LSL #2]\n"
-    "ld1w { z24.s }, p1/Z, [x21, x13, LSL #2]\n"
-    "ld1w { z23.s }, p1/Z, [x19, x13, LSL #2]\n"
+    "ld1w { z31.s }, p2/Z, [x26, x13, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x23, x13, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x20, x13, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x24, x13, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x27, x13, LSL #2]\n"
+    "ld1w { z26.s }, p2/Z, [x22, x13, LSL #2]\n"
+    "ld1w { z25.s }, p2/Z, [x25, x13, LSL #2]\n"
+    "ld1w { z24.s }, p2/Z, [x21, x13, LSL #2]\n"
+    "ld1w { z23.s }, p2/Z, [x19, x13, LSL #2]\n"
     "incw x13\n"
-    "whilelt p1.s, x13, x14\n"
+    "whilelt p2.s, x13, x14\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
-    "ld1w { z31.s }, p1/Z, [x26, x13, LSL #2]\n"
-    "whilelt p0.s, x12, x14\n"
-    "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
-    "ld1w { z30.s }, p1/Z, [x23, x13, LSL #2]\n"
-    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z27.s\n"
-    "ld1w { z29.s }, p1/Z, [x20, x13, LSL #2]\n"
-    "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
-    "ld1w { z27.s }, p1/Z, [x27, x13, LSL #2]\n"
-    "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z28.s\n"
-    "ld1w { z28.s }, p1/Z, [x24, x13, LSL #2]\n"
-    "movprfx z20, z26\n fmax z20.s, p2/M, z20.s, z23.s\n"
-    "ld1w { z26.s }, p1/Z, [x22, x13, LSL #2]\n"
-    "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
-    "ld1w { z25.s }, p1/Z, [x25, x13, LSL #2]\n"
-    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z17.s\n"
-    "ld1w { z24.s }, p1/Z, [x21, x13, LSL #2]\n"
-    "movprfx z17, z21\n fmax z17.s, p2/M, z17.s, z16.s\n"
-    "ld1w { z23.s }, p1/Z, [x19, x13, LSL #2]\n"
+    "movprfx z22, z31\n fmax z22.s, p1/M, z22.s, z30.s\n"
+    "movprfx z21, z30\n fmax z21.s, p1/M, z21.s, z29.s\n"
+    "ld1w { z31.s }, p2/Z, [x26, x13, LSL #2]\n"
+    "ld1w { z30.s }, p2/Z, [x23, x13, LSL #2]\n"
+    "movprfx z20, z28\n fmax z20.s, p1/M, z20.s, z27.s\n"
+    "movprfx z17, z26\n fmax z17.s, p1/M, z17.s, z25.s\n"
+    "ld1w { z29.s }, p2/Z, [x20, x13, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x27, x13, LSL #2]\n"
+    "movprfx z19, z24\n fmax z19.s, p1/M, z19.s, z28.s\n"
+    "movprfx z18, z26\n fmax z18.s, p1/M, z18.s, z23.s\n"
+    "ld1w { z28.s }, p2/Z, [x24, x13, LSL #2]\n"
+    "ld1w { z26.s }, p2/Z, [x22, x13, LSL #2]\n"
+    "ld1w { z25.s }, p2/Z, [x25, x13, LSL #2]\n"
+    "ld1w { z24.s }, p2/Z, [x21, x13, LSL #2]\n"
+    "whilelt p0.s, x10, x14\n"
+    "movprfx z16, z22\n fmax z16.s, p1/M, z16.s, z20.s\n"
+    "ld1w { z23.s }, p2/Z, [x19, x13, LSL #2]\n"
     "incw x13\n"
-    "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
-    "st1w { z19.s }, p0, [x11, x12, LSL #2]\n"
-    "whilelt p1.s, x13, x14\n"
-    "st1w { z18.s }, p0, [x10, x12, LSL #2]\n"
-    "st1w { z17.s }, p0, [x9, x12, LSL #2]\n"
-    "st1w { z16.s }, p0, [x28, x12, LSL #2]\n"
-    "incw x12\n"
+    "whilelt p2.s, x13, x14\n"
+    "st1w { z16.s }, p0, [x12, x10, LSL #2]\n"
+    "movprfx z16, z17\n fmax z16.s, p1/M, z16.s, z22.s\n"
+    "movprfx z17, z21\n fmax z17.s, p1/M, z17.s, z19.s\n"
+    "st1w { z16.s }, p0, [x11, x10, LSL #2]\n"
+    "movprfx z16, z21\n fmax z16.s, p1/M, z16.s, z18.s\n"
+    "st1w { z17.s }, p0, [x9, x10, LSL #2]\n"
+    "st1w { z16.s }, p0, [x28, x10, LSL #2]\n"
+    "incw x10\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
-    "whilelt p0.s, x12, x14\n"
-    "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
-    "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z27.s\n"
-    "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
-    "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z28.s\n"
-    "movprfx z20, z26\n fmax z20.s, p2/M, z20.s, z23.s\n"
-    "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
-    "st1w { z19.s }, p0, [x11, x12, LSL #2]\n"
-    "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z17.s\n"
-    "movprfx z17, z21\n fmax z17.s, p2/M, z17.s, z16.s\n"
-    "st1w { z18.s }, p0, [x10, x12, LSL #2]\n"
-    "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
-    "st1w { z17.s }, p0, [x9, x12, LSL #2]\n"
-    "st1w { z16.s }, p0, [x28, x12, LSL #2]\n"
+    "movprfx z22, z31\n fmax z22.s, p1/M, z22.s, z30.s\n"
+    "movprfx z21, z30\n fmax z21.s, p1/M, z21.s, z29.s\n"
+    "movprfx z20, z28\n fmax z20.s, p1/M, z20.s, z27.s\n"
+    "movprfx z17, z26\n fmax z17.s, p1/M, z17.s, z25.s\n"
+    "movprfx z19, z24\n fmax z19.s, p1/M, z19.s, z28.s\n"
+    "movprfx z18, z26\n fmax z18.s, p1/M, z18.s, z23.s\n"
+    "whilelt p0.s, x10, x14\n"
+    "movprfx z16, z22\n fmax z16.s, p1/M, z16.s, z20.s\n"
+    "st1w { z16.s }, p0, [x12, x10, LSL #2]\n"
+    "movprfx z16, z17\n fmax z16.s, p1/M, z16.s, z22.s\n"
+    "movprfx z17, z21\n fmax z17.s, p1/M, z17.s, z19.s\n"
+    "st1w { z16.s }, p0, [x11, x10, LSL #2]\n"
+    "movprfx z16, z21\n fmax z16.s, p1/M, z16.s, z18.s\n"
+    "st1w { z17.s }, p0, [x9, x10, LSL #2]\n"
+    "st1w { z16.s }, p0, [x28, x10, LSL #2]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp
index 5f65b7f..5f65350 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_fp32_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
 
-struct sve_fp32_nhwc_max_generic_depthfirst
+struct sve_fp32_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<float, float>
 {
-  typedef float operand_type;
-  typedef float return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const float *const *const inptrs, float *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_fp32_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<float, float>;
   sve_fp32_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_fp32_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
index a2f4398..fefddae 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 
@@ -39,181 +40,180 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cntw x27\n"
     "cntw x26, ALL, MUL #2\n"
     "cntw x25, ALL, MUL #3\n"
-    "whilelt p3.s, x28, %x[n_channels]\n"
-    "whilelt p2.s, x27, %x[n_channels]\n"
-    "whilelt p1.s, x26, %x[n_channels]\n"
-    "whilelt p0.s, x25, %x[n_channels]\n"
+    "whilelt p4.s, x28, %x[n_channels]\n"
+    "whilelt p3.s, x27, %x[n_channels]\n"
+    "whilelt p2.s, x26, %x[n_channels]\n"
+    "whilelt p1.s, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.s, #0xff800000\n"
     "mov z7.s, #0xff800000\n"
     "mov x19, %x[inptrs]\n"
     "mov z6.s, #0xff800000\n"
-    "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z5.s, #0xff800000\n"
-    "mov z4.s, #0xff800000\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
-    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
-    "ld1w { z22.s }, p2/Z, [x21, x27, LSL #2]\n"
-    "ld1w { z29.s }, p2/Z, [x20, x27, LSL #2]\n"
-    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
-    "ld1w { z27.s }, p1/Z, [x22, x26, LSL #2]\n"
-    "ld1w { z21.s }, p1/Z, [x21, x26, LSL #2]\n"
-    "ld1w { z26.s }, p1/Z, [x20, x26, LSL #2]\n"
-    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
-    "ld1w { z25.s }, p0/Z, [x22, x25, LSL #2]\n"
-    "ld1w { z20.s }, p0/Z, [x21, x25, LSL #2]\n"
-    "ld1w { z24.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x22, x27, LSL #2]\n"
+    "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [x20, x27, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x22, x26, LSL #2]\n"
+    "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x20, x26, LSL #2]\n"
+    "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
+    "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n fmax z18.s, p0/M, z18.s, z31.s\n"
+    "fmax z22.s, p0/M, z22.s, z30.s\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "movprfx z17, z29\n fmax z17.s, p0/M, z17.s, z28.s\n"
+    "fmax z21.s, p0/M, z21.s, z27.s\n"
+    "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
+    "movprfx z16, z26\n fmax z16.s, p0/M, z16.s, z25.s\n"
+    "fmax z20.s, p0/M, z20.s, z24.s\n"
+    "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z31.s }, p3/Z, [x22, x27, LSL #2]\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z18.s, p0/M, z18.s, z22.s\n"
+    "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+    "ld1w { z30.s }, p3/Z, [x20, x27, LSL #2]\n"
+    "fmax z17.s, p0/M, z17.s, z21.s\n"
+    "fmax z16.s, p0/M, z16.s, z20.s\n"
+    "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "ld1w { z28.s }, p2/Z, [x22, x26, LSL #2]\n"
+    "subs x24, x24, #0x1\n"
+    "fmax z8.s, p0/M, z8.s, z19.s\n"
+    "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+    "ld1w { z27.s }, p2/Z, [x20, x26, LSL #2]\n"
+    "fmax z7.s, p0/M, z7.s, z18.s\n"
+    "fmax z6.s, p0/M, z6.s, z17.s\n"
+    "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+    "fmax z5.s, p0/M, z5.s, z16.s\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "fmax z22.s, p4/M, z22.s, z29.s\n"
-    "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "fmax z21.s, p4/M, z21.s, z26.s\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "fmax z16.s, p4/M, z16.s, z25.s\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
-    "fmax z20.s, p4/M, z20.s, z24.s\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
-    "fmax z19.s, p4/M, z19.s, z23.s\n"
-    "ld1w { z30.s }, p2/Z, [x22, x27, LSL #2]\n"
-    "fmax z18.s, p4/M, z18.s, z22.s\n"
-    "ld1w { z22.s }, p2/Z, [x21, x27, LSL #2]\n"
-    "fmax z17.s, p4/M, z17.s, z21.s\n"
-    "ld1w { z29.s }, p2/Z, [x20, x27, LSL #2]\n"
-    "fmax z16.s, p4/M, z16.s, z20.s\n"
-    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
-    "fmax z7.s, p4/M, z7.s, z19.s\n"
-    "ld1w { z27.s }, p1/Z, [x22, x26, LSL #2]\n"
-    "fmax z6.s, p4/M, z6.s, z18.s\n"
-    "ld1w { z21.s }, p1/Z, [x21, x26, LSL #2]\n"
-    "fmax z5.s, p4/M, z5.s, z17.s\n"
-    "ld1w { z26.s }, p1/Z, [x20, x26, LSL #2]\n"
-    "fmax z4.s, p4/M, z4.s, z16.s\n"
-    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
-    "ld1w { z25.s }, p0/Z, [x22, x25, LSL #2]\n"
-    "ld1w { z20.s }, p0/Z, [x21, x25, LSL #2]\n"
-    "ld1w { z24.s }, p0/Z, [x20, x25, LSL #2]\n"
+    "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+    "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
-    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
-    "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
-    "fmax z22.s, p4/M, z22.s, z29.s\n"
-    "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
-    "fmax z21.s, p4/M, z21.s, z26.s\n"
-    "fmax z16.s, p4/M, z16.s, z25.s\n"
-    "fmax z20.s, p4/M, z20.s, z24.s\n"
-    "fmax z19.s, p4/M, z19.s, z23.s\n"
-    "fmax z18.s, p4/M, z18.s, z22.s\n"
-    "fmax z17.s, p4/M, z17.s, z21.s\n"
-    "fmax z16.s, p4/M, z16.s, z20.s\n"
-    "fmax z7.s, p4/M, z7.s, z19.s\n"
-    "fmax z6.s, p4/M, z6.s, z18.s\n"
-    "fmax z5.s, p4/M, z5.s, z17.s\n"
-    "fmax z4.s, p4/M, z4.s, z16.s\n"
+    "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
+    "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
+    "movprfx z18, z0\n fmax z18.s, p0/M, z18.s, z31.s\n"
+    "fmax z22.s, p0/M, z22.s, z30.s\n"
+    "movprfx z17, z29\n fmax z17.s, p0/M, z17.s, z28.s\n"
+    "fmax z21.s, p0/M, z21.s, z27.s\n"
+    "movprfx z16, z26\n fmax z16.s, p0/M, z16.s, z25.s\n"
+    "fmax z20.s, p0/M, z20.s, z24.s\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z18.s, p0/M, z18.s, z22.s\n"
+    "fmax z17.s, p0/M, z17.s, z21.s\n"
+    "fmax z16.s, p0/M, z16.s, z20.s\n"
+    "fmax z8.s, p0/M, z8.s, z19.s\n"
+    "fmax z7.s, p0/M, z7.s, z18.s\n"
+    "fmax z6.s, p0/M, z6.s, z17.s\n"
+    "fmax z5.s, p0/M, z5.s, z16.s\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
     "subs x20, x20, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "fmax z7.s, p4/M, z7.s, z3.s\n"
-    "ld1w { z31.s }, p2/Z, [x23, x27, LSL #2]\n"
-    "ld1w { z28.s }, p1/Z, [x23, x26, LSL #2]\n"
-    "fmax z6.s, p4/M, z6.s, z31.s\n"
-    "ld1w { z16.s }, p0/Z, [x23, x25, LSL #2]\n"
-    "fmax z5.s, p4/M, z5.s, z28.s\n"
-    "fmax z4.s, p4/M, z4.s, z16.s\n"
+    "fmax z8.s, p0/M, z8.s, z4.s\n"
+    "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+    "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
+    "fmax z7.s, p0/M, z7.s, z0.s\n"
+    "fmax z6.s, p0/M, z6.s, z29.s\n"
+    "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
+    "fmax z5.s, p0/M, z5.s, z26.s\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "st1w { z8.s }, p4, [%x[outptr], x28, LSL #2]\n"
     "incw x28, ALL, MUL #4\n"
-    "st1w { z6.s }, p2, [%x[outptr], x27, LSL #2]\n"
+    "st1w { z7.s }, p3, [%x[outptr], x27, LSL #2]\n"
     "incw x27, ALL, MUL #4\n"
-    "st1w { z5.s }, p1, [%x[outptr], x26, LSL #2]\n"
+    "st1w { z6.s }, p2, [%x[outptr], x26, LSL #2]\n"
     "incw x26, ALL, MUL #4\n"
-    "st1w { z4.s }, p0, [%x[outptr], x25, LSL #2]\n"
+    "st1w { z5.s }, p1, [%x[outptr], x25, LSL #2]\n"
     "incw x25, ALL, MUL #4\n"
-    "whilelt p0.s, x25, %x[n_channels]\n"
+    "whilelt p1.s, x25, %x[n_channels]\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.s, x28, %x[n_channels]\n"
+    "whilelt p4.s, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.s, #0xff800000\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.s, #0xff800000\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "add x19, x19, #0x20\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
+    "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
+    "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "subs x24, x24, #0x1\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+    "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+    "fmax z8.s, p0/M, z8.s, z19.s\n"
     "add x19, x19, #0x20\n"
-    "fmax z19.s, p4/M, z19.s, z23.s\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "ld1w { z2.s }, p3/Z, [x22, x28, LSL #2]\n"
-    "fmax z7.s, p4/M, z7.s, z19.s\n"
-    "ld1w { z1.s }, p3/Z, [x21, x28, LSL #2]\n"
-    "ld1w { z0.s }, p3/Z, [x20, x28, LSL #2]\n"
+    "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+    "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n fmax z19.s, p4/M, z19.s, z2.s\n"
-    "movprfx z23, z1\n fmax z23.s, p4/M, z23.s, z0.s\n"
-    "fmax z19.s, p4/M, z19.s, z23.s\n"
-    "fmax z7.s, p4/M, z7.s, z19.s\n"
+    "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
+    "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
+    "fmax z19.s, p0/M, z19.s, z23.s\n"
+    "fmax z8.s, p0/M, z8.s, z19.s\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
     "subs x20, x20, #0x1\n"
-    "ld1w { z3.s }, p3/Z, [x23, x28, LSL #2]\n"
-    "fmax z7.s, p4/M, z7.s, z3.s\n"
+    "fmax z8.s, p0/M, z8.s, z4.s\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+    "st1w { z8.s }, p4, [%x[outptr], x28, LSL #2]\n"
     "incw x28\n"
-    "whilelt p3.s, x28, %x[n_channels]\n"
+    "whilelt p4.s, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp
index 06582fe..dd2ff4f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_s8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
 
-struct sve_s8_nhwc_avg_generic_depthfirst
+struct sve_s8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_s8_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
   sve_s8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_s8_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 3581095..dab142f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,11 +23,12 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -84,30 +85,31 @@
       f_rescale_value *= 2.0f;
     }
 
-    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
-    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x26, #0x0\n"
     "cntb x25\n"
     "cntb x24, ALL, MUL #2\n"
     "cntb x23, ALL, MUL #3\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
-    "whilelt p2.b, x25, %x[n_channels]\n"
-    "whilelt p1.b, x24, %x[n_channels]\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "mov z11.s, #0x0\n"
@@ -124,41 +126,41 @@
     "mov z0.s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
     ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
     ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
     ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
     ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
     ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
     ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
     ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
     ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
     ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
     ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
@@ -202,156 +204,156 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508a3b5  // sshllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508a7b4  // sshllt z20.h, z29.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508a373  // sshllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508a772  // sshllt z18.h, z27.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
-    ".inst 0x4508a3b0  // sshllb z16.h, z29.b, #0x0\n"
-    ".inst 0x4590416b  // saddwb z11.s, z11.s, z16.h\n"
-    ".inst 0x4590454a  // saddwt z10.s, z10.s, z16.h\n"
-    ".inst 0x4508a7b0  // sshllt z16.h, z29.b, #0x0\n"
-    ".inst 0x45904129  // saddwb z9.s, z9.s, z16.h\n"
-    ".inst 0x45904508  // saddwt z8.s, z8.s, z16.h\n"
-    ".inst 0x4508a370  // sshllb z16.h, z27.b, #0x0\n"
-    ".inst 0x459040e7  // saddwb z7.s, z7.s, z16.h\n"
-    ".inst 0x459044c6  // saddwt z6.s, z6.s, z16.h\n"
-    ".inst 0x4508a770  // sshllt z16.h, z27.b, #0x0\n"
-    ".inst 0x459040a5  // saddwb z5.s, z5.s, z16.h\n"
-    ".inst 0x45904484  // saddwt z4.s, z4.s, z16.h\n"
-    ".inst 0x4508a330  // sshllb z16.h, z25.b, #0x0\n"
-    ".inst 0x45904063  // saddwb z3.s, z3.s, z16.h\n"
-    ".inst 0x45904442  // saddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508a331  // sshllb z17.h, z25.b, #0x0\n"
     ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
     ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
     ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z20.s, #0x7f\n"
-    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
-    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
-    "not z19.s, p4/M, z20.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
     ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
     ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
     ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
     ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
     ".inst 0x04b1756b  // sqdmulh z11.s, z11.s, z17.s\n"
     ".inst 0x04b1754a  // sqdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
     ".inst 0x04b17529  // sqdmulh z9.s, z9.s, z17.s\n"
     ".inst 0x04b17508  // sqdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
     ".inst 0x04b174e7  // sqdmulh z7.s, z7.s, z17.s\n"
     ".inst 0x04b174c6  // sqdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
     ".inst 0x04b174a5  // sqdmulh z5.s, z5.s, z17.s\n"
     ".inst 0x04b17484  // sqdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
     ".inst 0x04b17463  // sqdmulh z3.s, z3.s, z17.s\n"
     ".inst 0x04b17442  // sqdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
     ".inst 0x04b17421  // sqdmulh z1.s, z1.s, z17.s\n"
     ".inst 0x04b17400  // sqdmulh z0.s, z0.s, z17.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
-    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
-    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
-    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
-    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
-    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
-    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
-    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
-    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
-    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
-    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
-    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z19.s\n"
-    "smax z14.s, p4/M, z14.s, z19.s\n"
-    "smax z13.s, p4/M, z13.s, z19.s\n"
-    "smax z12.s, p4/M, z12.s, z19.s\n"
-    "smin z15.s, p4/M, z15.s, z20.s\n"
-    "smin z14.s, p4/M, z14.s, z20.s\n"
-    "smin z13.s, p4/M, z13.s, z20.s\n"
-    "smin z12.s, p4/M, z12.s, z20.s\n"
-    "smax z11.s, p4/M, z11.s, z19.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    "mov z18.s, #0x7f\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
-    "smax z10.s, p4/M, z10.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
+    "smin z11.s, p0/M, z11.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
-    "smin z11.s, p4/M, z11.s, z20.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
-    "smin z10.s, p4/M, z10.s, z20.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smax z9.s, p4/M, z9.s, z19.s\n"
-    "smax z8.s, p4/M, z8.s, z19.s\n"
-    "smax z7.s, p4/M, z7.s, z19.s\n"
-    "smax z6.s, p4/M, z6.s, z19.s\n"
-    "trn1 z18.h, z11.h, z10.h\n"
-    "smin z9.s, p4/M, z9.s, z20.s\n"
-    "smin z8.s, p4/M, z8.s, z20.s\n"
-    "smin z7.s, p4/M, z7.s, z20.s\n"
-    "smin z6.s, p4/M, z6.s, z20.s\n"
-    "smax z5.s, p4/M, z5.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z18.s\n"
+    "smin z9.s, p0/M, z9.s, z18.s\n"
+    "trn1 z17.h, z11.h, z10.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "smin z8.s, p0/M, z8.s, z18.s\n"
+    "smin z7.s, p0/M, z7.s, z18.s\n"
     "trn1 z16.h, z9.h, z8.h\n"
-    "smax z4.s, p4/M, z4.s, z19.s\n"
-    "trn1 z17.h, z7.h, z6.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
-    "smin z5.s, p4/M, z5.s, z20.s\n"
-    "incb x25, ALL, MUL #4\n"
-    "smin z4.s, p4/M, z4.s, z20.s\n"
-    "smax z3.s, p4/M, z3.s, z19.s\n"
-    "smax z2.s, p4/M, z2.s, z19.s\n"
-    "smax z1.s, p4/M, z1.s, z19.s\n"
-    "smax z0.s, p4/M, z0.s, z19.s\n"
-    "trn1 z16.h, z5.h, z4.h\n"
-    "smin z3.s, p4/M, z3.s, z20.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
-    "smin z2.s, p4/M, z2.s, z20.s\n"
-    "incb x24, ALL, MUL #4\n"
-    "smin z1.s, p4/M, z1.s, z20.s\n"
-    "smin z0.s, p4/M, z0.s, z20.s\n"
+    "smin z6.s, p0/M, z6.s, z18.s\n"
+    "smin z5.s, p0/M, z5.s, z18.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x25]\n"
+    "smin z4.s, p0/M, z4.s, z18.s\n"
+    "smin z3.s, p0/M, z3.s, z18.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
     "trn1 z17.h, z3.h, z2.h\n"
+    "st1b { z16.b }, p2, [%x[outptr], x24]\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
     "trn1 z16.h, z1.h, z0.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
     "incb x23, ALL, MUL #4\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "incb x26, ALL, MUL #4\n"
+    "incb x25, ALL, MUL #4\n"
+    "incb x24, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
     ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
     ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
@@ -365,45 +367,44 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
-    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
-    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
-    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z20.s, #0x7f\n"
-    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
-    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
-    "not z19.s, p4/M, z20.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
     ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
     ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
     ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
     ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z19.s\n"
-    "smax z14.s, p4/M, z14.s, z19.s\n"
-    "smax z13.s, p4/M, z13.s, z19.s\n"
-    "smax z12.s, p4/M, z12.s, z19.s\n"
-    "smin z15.s, p4/M, z15.s, z20.s\n"
-    "smin z14.s, p4/M, z14.s, z20.s\n"
-    "smin z13.s, p4/M, z13.s, z20.s\n"
-    "smin z12.s, p4/M, z12.s, z20.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    "mov z18.s, #0x7f\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
     "incb x26\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
     : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -413,4 +414,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 46132f2..ac842ac 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst
+struct sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
+  using Parent = DepthfirstStrategy<int8_t, int8_t>;
 
-  typedef void (*kern_type)(unsigned int, const int8_t *const *const, int8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index beabe7b..0cf3774 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -64,76 +64,76 @@
 
   __asm__ __volatile__(
     "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
-    "ptrue p2.b\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
     "mov x13, #0x0\n"
+    "whilelt p2.b, x13, x14\n"
     "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
-    "mov x12, #0x0\n"
-    "ldp x11, x10, [x20, #0x0]\n"
-    "whilelt p1.b, x13, x14\n"
+    "ldp x12, x11, [x20, #0x0]\n"
+    "ptrue p1.b\n"
+    "mov x10, #0x0\n"
     "ldp x9, x28, [x20, #0x10]\n"
     "ldp x27, x26, [x19, #0x0]\n"
     "ldp x25, x24, [x19, #0x10]\n"
     "ldp x23, x22, [x19, #0x20]\n"
     "ldp x21, x20, [x19, #0x30]\n"
     "ldr x19, [x19, #0x40]\n"
-    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
-    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
-    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
-    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
-    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
-    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
-    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
-    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
-    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+    "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+    "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+    "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+    "ld1b { z23.b }, p2/Z, [x19, x13]\n"
     "incw x13\n"
-    "whilelt p1.b, x13, x14\n"
+    "whilelt p2.b, x13, x14\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
-    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
-    "whilelt p0.b, x12, x14\n"
-    "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
-    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
-    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z27.b\n"
-    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
-    "movprfx z17, z26\n smax z17.b, p2/M, z17.b, z25.b\n"
-    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
-    "movprfx z16, z24\n smax z16.b, p2/M, z16.b, z28.b\n"
-    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
-    "movprfx z20, z26\n smax z20.b, p2/M, z20.b, z23.b\n"
-    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
-    "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
-    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
-    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z17.b\n"
-    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
-    "movprfx z17, z21\n smax z17.b, p2/M, z17.b, z16.b\n"
-    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "movprfx z22, z31\n smax z22.b, p1/M, z22.b, z30.b\n"
+    "movprfx z21, z30\n smax z21.b, p1/M, z21.b, z29.b\n"
+    "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+    "movprfx z20, z28\n smax z20.b, p1/M, z20.b, z27.b\n"
+    "movprfx z17, z26\n smax z17.b, p1/M, z17.b, z25.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+    "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+    "movprfx z19, z24\n smax z19.b, p1/M, z19.b, z28.b\n"
+    "movprfx z18, z26\n smax z18.b, p1/M, z18.b, z23.b\n"
+    "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+    "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+    "whilelt p0.b, x10, x14\n"
+    "movprfx z16, z22\n smax z16.b, p1/M, z16.b, z20.b\n"
+    "ld1b { z23.b }, p2/Z, [x19, x13]\n"
     "incw x13\n"
-    "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
-    "st1b { z19.b }, p0, [x11, x12]\n"
-    "whilelt p1.b, x13, x14\n"
-    "st1b { z18.b }, p0, [x10, x12]\n"
-    "st1b { z17.b }, p0, [x9, x12]\n"
-    "st1b { z16.b }, p0, [x28, x12]\n"
-    "incw x12\n"
+    "whilelt p2.b, x13, x14\n"
+    "st1b { z16.b }, p0, [x12, x10]\n"
+    "movprfx z16, z17\n smax z16.b, p1/M, z16.b, z22.b\n"
+    "movprfx z17, z21\n smax z17.b, p1/M, z17.b, z19.b\n"
+    "st1b { z16.b }, p0, [x11, x10]\n"
+    "movprfx z16, z21\n smax z16.b, p1/M, z16.b, z18.b\n"
+    "st1b { z17.b }, p0, [x9, x10]\n"
+    "st1b { z16.b }, p0, [x28, x10]\n"
+    "incw x10\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
-    "whilelt p0.b, x12, x14\n"
-    "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
-    "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z27.b\n"
-    "movprfx z17, z26\n smax z17.b, p2/M, z17.b, z25.b\n"
-    "movprfx z16, z24\n smax z16.b, p2/M, z16.b, z28.b\n"
-    "movprfx z20, z26\n smax z20.b, p2/M, z20.b, z23.b\n"
-    "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
-    "st1b { z19.b }, p0, [x11, x12]\n"
-    "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z17.b\n"
-    "movprfx z17, z21\n smax z17.b, p2/M, z17.b, z16.b\n"
-    "st1b { z18.b }, p0, [x10, x12]\n"
-    "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
-    "st1b { z17.b }, p0, [x9, x12]\n"
-    "st1b { z16.b }, p0, [x28, x12]\n"
+    "movprfx z22, z31\n smax z22.b, p1/M, z22.b, z30.b\n"
+    "movprfx z21, z30\n smax z21.b, p1/M, z21.b, z29.b\n"
+    "movprfx z20, z28\n smax z20.b, p1/M, z20.b, z27.b\n"
+    "movprfx z17, z26\n smax z17.b, p1/M, z17.b, z25.b\n"
+    "movprfx z19, z24\n smax z19.b, p1/M, z19.b, z28.b\n"
+    "movprfx z18, z26\n smax z18.b, p1/M, z18.b, z23.b\n"
+    "whilelt p0.b, x10, x14\n"
+    "movprfx z16, z22\n smax z16.b, p1/M, z16.b, z20.b\n"
+    "st1b { z16.b }, p0, [x12, x10]\n"
+    "movprfx z16, z17\n smax z16.b, p1/M, z16.b, z22.b\n"
+    "movprfx z17, z21\n smax z17.b, p1/M, z17.b, z19.b\n"
+    "st1b { z16.b }, p0, [x11, x10]\n"
+    "movprfx z16, z21\n smax z16.b, p1/M, z16.b, z18.b\n"
+    "st1b { z17.b }, p0, [x9, x10]\n"
+    "st1b { z16.b }, p0, [x28, x10]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp
index 168cbf5..2ee5bc0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_s8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
 
-struct sve_s8_nhwc_max_generic_depthfirst
+struct sve_s8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_s8_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t>;
   sve_s8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_s8_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
index 11195f5..3fd4828 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 
@@ -39,181 +40,180 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cntb x27\n"
     "cntb x26, ALL, MUL #2\n"
     "cntb x25, ALL, MUL #3\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
-    "whilelt p2.b, x27, %x[n_channels]\n"
-    "whilelt p1.b, x26, %x[n_channels]\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x80\n"
     "mov z7.b, #0x80\n"
     "mov x19, %x[inptrs]\n"
     "mov z6.b, #0x80\n"
-    "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z5.b, #0x80\n"
-    "mov z4.b, #0x80\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
+    "smax z22.b, p0/M, z22.b, z30.b\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
+    "smax z21.b, p0/M, z21.b, z27.b\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "smax z7.b, p0/M, z7.b, z18.b\n"
+    "smax z6.b, p0/M, z6.b, z17.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "smax z5.b, p0/M, z5.b, z16.b\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "smax z21.b, p4/M, z21.b, z26.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "smax z16.b, p4/M, z16.b, z25.b\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "smax z20.b, p4/M, z20.b, z24.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "smax z18.b, p4/M, z18.b, z22.b\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "smax z17.b, p4/M, z17.b, z21.b\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "smax z16.b, p4/M, z16.b, z20.b\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "smax z7.b, p4/M, z7.b, z19.b\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "smax z6.b, p4/M, z6.b, z18.b\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "smax z5.b, p4/M, z5.b, z17.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "smax z4.b, p4/M, z4.b, z16.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
-    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
-    "smax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
-    "smax z21.b, p4/M, z21.b, z26.b\n"
-    "smax z16.b, p4/M, z16.b, z25.b\n"
-    "smax z20.b, p4/M, z20.b, z24.b\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "smax z18.b, p4/M, z18.b, z22.b\n"
-    "smax z17.b, p4/M, z17.b, z21.b\n"
-    "smax z16.b, p4/M, z16.b, z20.b\n"
-    "smax z7.b, p4/M, z7.b, z19.b\n"
-    "smax z6.b, p4/M, z6.b, z18.b\n"
-    "smax z5.b, p4/M, z5.b, z17.b\n"
-    "smax z4.b, p4/M, z4.b, z16.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+    "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
+    "smax z22.b, p0/M, z22.b, z30.b\n"
+    "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
+    "smax z21.b, p0/M, z21.b, z27.b\n"
+    "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
+    "smax z7.b, p0/M, z7.b, z18.b\n"
+    "smax z6.b, p0/M, z6.b, z17.b\n"
+    "smax z5.b, p0/M, z5.b, z16.b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z7.b, p4/M, z7.b, z3.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "smax z6.b, p4/M, z6.b, z31.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "smax z5.b, p4/M, z5.b, z28.b\n"
-    "smax z4.b, p4/M, z4.b, z16.b\n"
+    "smax z8.b, p0/M, z8.b, z4.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "smax z7.b, p0/M, z7.b, z0.b\n"
+    "smax z6.b, p0/M, z6.b, z29.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "smax z5.b, p0/M, z5.b, z26.b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z8.b }, p4, [%x[outptr], x28]\n"
     "incb x28, ALL, MUL #4\n"
-    "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+    "st1b { z7.b }, p3, [%x[outptr], x27]\n"
     "incb x27, ALL, MUL #4\n"
-    "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+    "st1b { z6.b }, p2, [%x[outptr], x26]\n"
     "incb x26, ALL, MUL #4\n"
-    "st1b { z4.b }, p0, [%x[outptr], x25]\n"
+    "st1b { z5.b }, p1, [%x[outptr], x25]\n"
     "incb x25, ALL, MUL #4\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
     "add x19, x19, #0x20\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "smax z7.b, p4/M, z7.b, z19.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "smax z7.b, p4/M, z7.b, z19.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z7.b, p4/M, z7.b, z3.b\n"
+    "smax z8.b, p0/M, z8.b, z4.b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z8.b }, p4, [%x[outptr], x28]\n"
     "incb x28\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp
index 637940e..6f34faa 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_s8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
 
-struct sve_s8q_nhwc_avg_generic_depthfirst
+struct sve_s8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_s8q_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
   sve_s8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_s8q_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 75be96e..c431fec 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,11 +24,12 @@
 
 #include "pooling.hpp"
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -86,12 +87,13 @@
       f_rescale_value *= 2.0f;
     }
 
-    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
-    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   // Combine together the rescale value for the requantization and the scaling
@@ -112,21 +114,21 @@
   );
 
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x26, #0x0\n"
     "cntb x25\n"
     "cntb x24, ALL, MUL #2\n"
     "cntb x23, ALL, MUL #3\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
-    "whilelt p2.b, x25, %x[n_channels]\n"
-    "whilelt p1.b, x24, %x[n_channels]\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "mov z11.s, #0x0\n"
@@ -143,41 +145,41 @@
     "mov z0.s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x455c03b5  // saddlb z21.h, z29.b, z28.b\n"
     ".inst 0x455c07b4  // saddlt z20.h, z29.b, z28.b\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x455a0373  // saddlb z19.h, z27.b, z26.b\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
     ".inst 0x455a0772  // saddlt z18.h, z27.b, z26.b\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
     ".inst 0x45580331  // saddlb z17.h, z25.b, z24.b\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
     ".inst 0x45580730  // saddlt z16.h, z25.b, z24.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
     ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
     ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
     ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
     ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
     ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
     ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
@@ -221,173 +223,173 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508a3b5  // sshllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508a7b4  // sshllt z20.h, z29.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508a373  // sshllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508a772  // sshllt z18.h, z27.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
-    ".inst 0x4508a3b0  // sshllb z16.h, z29.b, #0x0\n"
-    ".inst 0x4590416b  // saddwb z11.s, z11.s, z16.h\n"
-    ".inst 0x4590454a  // saddwt z10.s, z10.s, z16.h\n"
-    ".inst 0x4508a7b0  // sshllt z16.h, z29.b, #0x0\n"
-    ".inst 0x45904129  // saddwb z9.s, z9.s, z16.h\n"
-    ".inst 0x45904508  // saddwt z8.s, z8.s, z16.h\n"
-    ".inst 0x4508a370  // sshllb z16.h, z27.b, #0x0\n"
-    ".inst 0x459040e7  // saddwb z7.s, z7.s, z16.h\n"
-    ".inst 0x459044c6  // saddwt z6.s, z6.s, z16.h\n"
-    ".inst 0x4508a770  // sshllt z16.h, z27.b, #0x0\n"
-    ".inst 0x459040a5  // saddwb z5.s, z5.s, z16.h\n"
-    ".inst 0x45904484  // saddwt z4.s, z4.s, z16.h\n"
-    ".inst 0x4508a330  // sshllb z16.h, z25.b, #0x0\n"
-    ".inst 0x45904063  // saddwb z3.s, z3.s, z16.h\n"
-    ".inst 0x45904442  // saddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508a331  // sshllb z17.h, z25.b, #0x0\n"
     ".inst 0x4508a730  // sshllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595416b  // saddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x4595454a  // saddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944129  // saddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944508  // saddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459340e7  // saddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x459344c6  // saddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459240a5  // saddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924484  // saddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914063  // saddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914442  // saddwt z2.s, z2.s, z17.h\n"
     ".inst 0x45904021  // saddwb z1.s, z1.s, z16.h\n"
     ".inst 0x45904400  // saddwt z0.s, z0.s, z16.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z20.s, #0x7f\n"
-    "ld1rw { z18.s }, p4/Z, [%x[combined_rescale_value]]\n"
-    "ld1rw { z17.s }, p4/Z, [%x[left_shift]]\n"
-    "not z19.s, p4/M, z20.s\n"
-    "ld1rw { z16.s }, p4/Z, [%x[right_shift]]\n"
-    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
-    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
-    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
-    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
-    ".inst 0x4482922b  // srshl z11.s, p4/M, z11.s, z17.s\n"
-    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
-    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
-    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
-    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
-    ".inst 0x04b2756b  // sqrdmulh z11.s, z11.s, z18.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
-    ".inst 0x4482922a  // srshl z10.s, p4/M, z10.s, z17.s\n"
-    ".inst 0x44829229  // srshl z9.s, p4/M, z9.s, z17.s\n"
-    ".inst 0x44829228  // srshl z8.s, p4/M, z8.s, z17.s\n"
-    ".inst 0x44829227  // srshl z7.s, p4/M, z7.s, z17.s\n"
-    ".inst 0x04b2754a  // sqrdmulh z10.s, z10.s, z18.s\n"
-    ".inst 0x04b27529  // sqrdmulh z9.s, z9.s, z18.s\n"
-    ".inst 0x04b27508  // sqrdmulh z8.s, z8.s, z18.s\n"
-    ".inst 0x04b274e7  // sqrdmulh z7.s, z7.s, z18.s\n"
-    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
-    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
-    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
-    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
-    ".inst 0x44829226  // srshl z6.s, p4/M, z6.s, z17.s\n"
-    ".inst 0x44829225  // srshl z5.s, p4/M, z5.s, z17.s\n"
-    ".inst 0x44829224  // srshl z4.s, p4/M, z4.s, z17.s\n"
-    ".inst 0x44829223  // srshl z3.s, p4/M, z3.s, z17.s\n"
-    ".inst 0x04b274c6  // sqrdmulh z6.s, z6.s, z18.s\n"
-    ".inst 0x04b274a5  // sqrdmulh z5.s, z5.s, z18.s\n"
-    ".inst 0x04b27484  // sqrdmulh z4.s, z4.s, z18.s\n"
-    ".inst 0x04b27463  // sqrdmulh z3.s, z3.s, z18.s\n"
-    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
-    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
-    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
-    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
-    ".inst 0x44829222  // srshl z2.s, p4/M, z2.s, z17.s\n"
-    ".inst 0x44829221  // srshl z1.s, p4/M, z1.s, z17.s\n"
-    ".inst 0x44829220  // srshl z0.s, p4/M, z0.s, z17.s\n"
-    "smax z15.s, p4/M, z15.s, z19.s\n"
-    ".inst 0x04b27442  // sqrdmulh z2.s, z2.s, z18.s\n"
-    ".inst 0x04b27421  // sqrdmulh z1.s, z1.s, z18.s\n"
-    ".inst 0x04b27400  // sqrdmulh z0.s, z0.s, z18.s\n"
-    "smin z15.s, p4/M, z15.s, z20.s\n"
-    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
-    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
-    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
-    "smax z14.s, p4/M, z14.s, z19.s\n"
-    "smax z13.s, p4/M, z13.s, z19.s\n"
-    "smax z12.s, p4/M, z12.s, z19.s\n"
-    "smax z11.s, p4/M, z11.s, z19.s\n"
-    "smin z14.s, p4/M, z14.s, z20.s\n"
-    "smin z13.s, p4/M, z13.s, z20.s\n"
-    "smin z12.s, p4/M, z12.s, z20.s\n"
-    "smin z11.s, p4/M, z11.s, z20.s\n"
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x4482824b  // srshl z11.s, p0/M, z11.s, z18.s\n"
+    ".inst 0x4482824a  // srshl z10.s, p0/M, z10.s, z18.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x44828249  // srshl z9.s, p0/M, z9.s, z18.s\n"
+    ".inst 0x44828248  // srshl z8.s, p0/M, z8.s, z18.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x04b1756b  // sqrdmulh z11.s, z11.s, z17.s\n"
+    ".inst 0x44828247  // srshl z7.s, p0/M, z7.s, z18.s\n"
+    ".inst 0x44828246  // srshl z6.s, p0/M, z6.s, z18.s\n"
+    ".inst 0x04b1754a  // sqrdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x04b17529  // sqrdmulh z9.s, z9.s, z17.s\n"
+    ".inst 0x44828245  // srshl z5.s, p0/M, z5.s, z18.s\n"
+    ".inst 0x44828244  // srshl z4.s, p0/M, z4.s, z18.s\n"
+    ".inst 0x04b17508  // sqrdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x04b174e7  // sqrdmulh z7.s, z7.s, z17.s\n"
+    ".inst 0x44828243  // srshl z3.s, p0/M, z3.s, z18.s\n"
+    ".inst 0x44828242  // srshl z2.s, p0/M, z2.s, z18.s\n"
+    ".inst 0x04b174c6  // sqrdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x04b174a5  // sqrdmulh z5.s, z5.s, z17.s\n"
+    ".inst 0x44828241  // srshl z1.s, p0/M, z1.s, z18.s\n"
+    ".inst 0x44828240  // srshl z0.s, p0/M, z0.s, z18.s\n"
+    ".inst 0x04b17484  // sqrdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x04b17463  // sqrdmulh z3.s, z3.s, z17.s\n"
+    ".inst 0x04b17442  // sqrdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x04b17421  // sqrdmulh z1.s, z1.s, z17.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x04b17400  // sqrdmulh z0.s, z0.s, z17.s\n"
+    "mov z18.s, #0x7f\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
-    "smax z10.s, p4/M, z10.s, z19.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
+    "smin z11.s, p0/M, z11.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
-    "smax z9.s, p4/M, z9.s, z19.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
-    "smin z10.s, p4/M, z10.s, z20.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smin z9.s, p4/M, z9.s, z20.s\n"
-    "smax z8.s, p4/M, z8.s, z19.s\n"
-    "smax z7.s, p4/M, z7.s, z19.s\n"
-    "smax z6.s, p4/M, z6.s, z19.s\n"
-    "trn1 z18.h, z11.h, z10.h\n"
-    "smin z8.s, p4/M, z8.s, z20.s\n"
-    "smin z7.s, p4/M, z7.s, z20.s\n"
-    "smin z6.s, p4/M, z6.s, z20.s\n"
-    "smax z5.s, p4/M, z5.s, z19.s\n"
+    "smin z10.s, p0/M, z10.s, z18.s\n"
+    "smin z9.s, p0/M, z9.s, z18.s\n"
+    "trn1 z17.h, z11.h, z10.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "smin z8.s, p0/M, z8.s, z18.s\n"
+    "smin z7.s, p0/M, z7.s, z18.s\n"
     "trn1 z16.h, z9.h, z8.h\n"
-    "smax z4.s, p4/M, z4.s, z19.s\n"
-    "trn1 z17.h, z7.h, z6.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
-    "smin z5.s, p4/M, z5.s, z20.s\n"
-    "incb x25, ALL, MUL #4\n"
-    "smin z4.s, p4/M, z4.s, z20.s\n"
-    "smax z3.s, p4/M, z3.s, z19.s\n"
-    "smax z2.s, p4/M, z2.s, z19.s\n"
-    "smax z1.s, p4/M, z1.s, z19.s\n"
-    "smax z0.s, p4/M, z0.s, z19.s\n"
-    "trn1 z16.h, z5.h, z4.h\n"
-    "smin z3.s, p4/M, z3.s, z20.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
-    "smin z2.s, p4/M, z2.s, z20.s\n"
-    "incb x24, ALL, MUL #4\n"
-    "smin z1.s, p4/M, z1.s, z20.s\n"
-    "smin z0.s, p4/M, z0.s, z20.s\n"
+    "smin z6.s, p0/M, z6.s, z18.s\n"
+    "smin z5.s, p0/M, z5.s, z18.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x25]\n"
+    "smin z4.s, p0/M, z4.s, z18.s\n"
+    "smin z3.s, p0/M, z3.s, z18.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
     "trn1 z17.h, z3.h, z2.h\n"
+    "st1b { z16.b }, p2, [%x[outptr], x24]\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
     "trn1 z16.h, z1.h, z0.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
     "incb x23, ALL, MUL #4\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "incb x26, ALL, MUL #4\n"
+    "incb x25, ALL, MUL #4\n"
+    "incb x24, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e07f6  // saddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
     ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
     ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     ".inst 0x455e03f7  // saddlb z23.h, z31.b, z30.b\n"
@@ -401,50 +403,49 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508a3f7  // sshllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508a7f6  // sshllt z22.h, z31.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508a3f1  // sshllb z17.h, z31.b, #0x0\n"
-    ".inst 0x4508a7f0  // sshllt z16.h, z31.b, #0x0\n"
-    ".inst 0x459141ef  // saddwb z15.s, z15.s, z17.h\n"
-    ".inst 0x459145ce  // saddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459041ad  // saddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x4590458c  // saddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x459741ef  // saddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x459745ce  // saddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459641ad  // saddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x4596458c  // saddwt z12.s, z12.s, z22.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z20.s, #0x7f\n"
-    "ld1rw { z18.s }, p4/Z, [%x[combined_rescale_value]]\n"
-    "ld1rw { z17.s }, p4/Z, [%x[left_shift]]\n"
-    "not z19.s, p4/M, z20.s\n"
-    "ld1rw { z16.s }, p4/Z, [%x[right_shift]]\n"
-    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
-    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
-    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
-    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
-    ".inst 0x04b275ef  // sqrdmulh z15.s, z15.s, z18.s\n"
-    ".inst 0x04b275ce  // sqrdmulh z14.s, z14.s, z18.s\n"
-    ".inst 0x04b275ad  // sqrdmulh z13.s, z13.s, z18.s\n"
-    ".inst 0x04b2758c  // sqrdmulh z12.s, z12.s, z18.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z19.s\n"
-    "smax z14.s, p4/M, z14.s, z19.s\n"
-    "smax z13.s, p4/M, z13.s, z19.s\n"
-    "smax z12.s, p4/M, z12.s, z19.s\n"
-    "smin z15.s, p4/M, z15.s, z20.s\n"
-    "smin z14.s, p4/M, z14.s, z20.s\n"
-    "smin z13.s, p4/M, z13.s, z20.s\n"
-    "smin z12.s, p4/M, z12.s, z20.s\n"
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b175ef  // sqrdmulh z15.s, z15.s, z17.s\n"
+    ".inst 0x04b175ce  // sqrdmulh z14.s, z14.s, z17.s\n"
+    ".inst 0x04b175ad  // sqrdmulh z13.s, z13.s, z17.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x04b1758c  // sqrdmulh z12.s, z12.s, z17.s\n"
+    "mov z18.s, #0x7f\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
     "incb x26\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [right_shift] "r" (&right_shift)
     : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -454,4 +455,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp
index 5aced30..fc06ed0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_s8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
 
-struct sve_s8q_nhwc_max_generic_depthfirst
+struct sve_s8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>
 {
-  typedef int8_t operand_type;
-  typedef int8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const int8_t *const *const inptrs, int8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_s8q_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<int8_t, int8_t, Requantize32>;
   sve_s8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_s8q_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
index 7f00d46..5ef1414 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,8 +24,9 @@
 
 #include "pooling.hpp"
 #include <cstdint>
+#include <cstddef>
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -41,339 +42,338 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cntb x27\n"
     "cntb x26, ALL, MUL #2\n"
     "cntb x25, ALL, MUL #3\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
-    "whilelt p2.b, x27, %x[n_channels]\n"
-    "whilelt p1.b, x26, %x[n_channels]\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z8.b, #0x80\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z7.b, #0x80\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x80\n"
+    "mov z7.b, #0x80\n"
+    "mov x19, %x[inptrs]\n"
     "mov z6.b, #0x80\n"
     "mov z5.b, #0x80\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
+    "smax z22.b, p0/M, z22.b, z30.b\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
+    "smax z21.b, p0/M, z21.b, z27.b\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "subs x24, x24, #0x1\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "smax z7.b, p0/M, z7.b, z18.b\n"
+    "smax z6.b, p0/M, z6.b, z17.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "smax z5.b, p0/M, z5.b, z16.b\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "smax z21.b, p4/M, z21.b, z26.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "smax z16.b, p4/M, z16.b, z25.b\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "smax z20.b, p4/M, z20.b, z24.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "smax z18.b, p4/M, z18.b, z22.b\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "smax z17.b, p4/M, z17.b, z21.b\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "smax z16.b, p4/M, z16.b, z20.b\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "smax z8.b, p4/M, z8.b, z19.b\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "smax z7.b, p4/M, z7.b, z18.b\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "smax z6.b, p4/M, z6.b, z17.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "smax z5.b, p4/M, z5.b, z16.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
-    "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
-    "smax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
-    "smax z21.b, p4/M, z21.b, z26.b\n"
-    "smax z16.b, p4/M, z16.b, z25.b\n"
-    "smax z20.b, p4/M, z20.b, z24.b\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "smax z18.b, p4/M, z18.b, z22.b\n"
-    "smax z17.b, p4/M, z17.b, z21.b\n"
-    "smax z16.b, p4/M, z16.b, z20.b\n"
-    "smax z8.b, p4/M, z8.b, z19.b\n"
-    "smax z7.b, p4/M, z7.b, z18.b\n"
-    "smax z6.b, p4/M, z6.b, z17.b\n"
-    "smax z5.b, p4/M, z5.b, z16.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+    "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
+    "smax z22.b, p0/M, z22.b, z30.b\n"
+    "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
+    "smax z21.b, p0/M, z21.b, z27.b\n"
+    "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
+    "smax z20.b, p0/M, z20.b, z24.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z18.b, p0/M, z18.b, z22.b\n"
+    "smax z17.b, p0/M, z17.b, z21.b\n"
+    "smax z16.b, p0/M, z16.b, z20.b\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
+    "smax z7.b, p0/M, z7.b, z18.b\n"
+    "smax z6.b, p0/M, z6.b, z17.b\n"
+    "smax z5.b, p0/M, z5.b, z16.b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z8.b, p4/M, z8.b, z3.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "smax z7.b, p4/M, z7.b, z31.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "smax z6.b, p4/M, z6.b, z28.b\n"
-    "smax z5.b, p4/M, z5.b, z16.b\n"
+    "smax z8.b, p0/M, z8.b, z4.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "smax z7.b, p0/M, z7.b, z0.b\n"
+    "smax z6.b, p0/M, z6.b, z29.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "smax z5.b, p0/M, z5.b, z26.b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z4.s, #0x7f\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "ld1rw { z3.s }, p4/Z, [x19]\n"
     ".inst 0x4508a111  // sshllb z17.h, z8.b, #0x0\n"
+    ".inst 0x4508a517  // sshllt z23.h, z8.b, #0x0\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    ".inst 0x4508a510  // sshllt z16.h, z8.b, #0x0\n"
-    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a0f6  // sshllb z22.h, z7.b, #0x0\n"
+    ".inst 0x4508a4f5  // sshllt z21.h, z7.b, #0x0\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a0d4  // sshllb z20.h, z6.b, #0x0\n"
+    ".inst 0x4508a4d3  // sshllt z19.h, z6.b, #0x0\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    ".inst 0x4508a0f2  // sshllb z18.h, z7.b, #0x0\n"
-    "ld1rw { z1.s }, p4/Z, [x19]\n"
-    ".inst 0x4508a4f7  // sshllt z23.h, z7.b, #0x0\n"
-    ".inst 0x4508a0d6  // sshllb z22.h, z6.b, #0x0\n"
-    ".inst 0x4508a4d5  // sshllt z21.h, z6.b, #0x0\n"
-    ".inst 0x4508a0b4  // sshllb z20.h, z5.b, #0x0\n"
-    ".inst 0x4508a4b3  // sshllt z19.h, z5.b, #0x0\n"
-    ".inst 0x4510a220  // sshllb z0.s, z17.h, #0x0\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a0b2  // sshllb z18.h, z5.b, #0x0\n"
+    ".inst 0x4508a4b0  // sshllt z16.h, z5.b, #0x0\n"
+    ".inst 0x4510a221  // sshllb z1.s, z17.h, #0x0\n"
     ".inst 0x4510a631  // sshllt z17.s, z17.h, #0x0\n"
-    ".inst 0x4510a21f  // sshllb z31.s, z16.h, #0x0\n"
-    ".inst 0x4510a610  // sshllt z16.s, z16.h, #0x0\n"
-    ".inst 0x4510a25e  // sshllb z30.s, z18.h, #0x0\n"
-    ".inst 0x4510a652  // sshllt z18.s, z18.h, #0x0\n"
-    ".inst 0x4510a2fd  // sshllb z29.s, z23.h, #0x0\n"
-    ".inst 0x4510a6fc  // sshllt z28.s, z23.h, #0x0\n"
-    ".inst 0x4510a2db  // sshllb z27.s, z22.h, #0x0\n"
-    ".inst 0x4510a6da  // sshllt z26.s, z22.h, #0x0\n"
-    ".inst 0x4510a2b9  // sshllb z25.s, z21.h, #0x0\n"
-    ".inst 0x4510a6b8  // sshllt z24.s, z21.h, #0x0\n"
-    ".inst 0x4510a297  // sshllb z23.s, z20.h, #0x0\n"
-    ".inst 0x4510a696  // sshllt z22.s, z20.h, #0x0\n"
-    ".inst 0x4510a275  // sshllb z21.s, z19.h, #0x0\n"
-    ".inst 0x4510a674  // sshllt z20.s, z19.h, #0x0\n"
-    ".inst 0x44829040  // srshl z0.s, p4/M, z0.s, z2.s\n"
-    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
-    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
-    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
-    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    ".inst 0x44828081  // srshl z1.s, p0/M, z1.s, z4.s\n"
+    ".inst 0x44828091  // srshl z17.s, p0/M, z17.s, z4.s\n"
+    ".inst 0x4510a2e0  // sshllb z0.s, z23.h, #0x0\n"
+    ".inst 0x4510a6ff  // sshllt z31.s, z23.h, #0x0\n"
+    ".inst 0x44828080  // srshl z0.s, p0/M, z0.s, z4.s\n"
+    ".inst 0x4482809f  // srshl z31.s, p0/M, z31.s, z4.s\n"
+    ".inst 0x4510a2de  // sshllb z30.s, z22.h, #0x0\n"
+    ".inst 0x4510a6dd  // sshllt z29.s, z22.h, #0x0\n"
+    ".inst 0x4482809e  // srshl z30.s, p0/M, z30.s, z4.s\n"
+    ".inst 0x4482809d  // srshl z29.s, p0/M, z29.s, z4.s\n"
+    ".inst 0x4510a2bc  // sshllb z28.s, z21.h, #0x0\n"
+    ".inst 0x4510a6bb  // sshllt z27.s, z21.h, #0x0\n"
+    ".inst 0x4482809c  // srshl z28.s, p0/M, z28.s, z4.s\n"
+    ".inst 0x4482809b  // srshl z27.s, p0/M, z27.s, z4.s\n"
+    ".inst 0x4510a29a  // sshllb z26.s, z20.h, #0x0\n"
+    ".inst 0x4510a699  // sshllt z25.s, z20.h, #0x0\n"
+    ".inst 0x4482809a  // srshl z26.s, p0/M, z26.s, z4.s\n"
+    ".inst 0x44828099  // srshl z25.s, p0/M, z25.s, z4.s\n"
+    ".inst 0x4510a278  // sshllb z24.s, z19.h, #0x0\n"
+    ".inst 0x4510a677  // sshllt z23.s, z19.h, #0x0\n"
+    ".inst 0x44828098  // srshl z24.s, p0/M, z24.s, z4.s\n"
+    ".inst 0x44828097  // srshl z23.s, p0/M, z23.s, z4.s\n"
+    ".inst 0x4510a256  // sshllb z22.s, z18.h, #0x0\n"
+    ".inst 0x4510a655  // sshllt z21.s, z18.h, #0x0\n"
+    ".inst 0x44828096  // srshl z22.s, p0/M, z22.s, z4.s\n"
+    ".inst 0x44828095  // srshl z21.s, p0/M, z21.s, z4.s\n"
+    ".inst 0x4510a214  // sshllb z20.s, z16.h, #0x0\n"
+    ".inst 0x4510a613  // sshllt z19.s, z16.h, #0x0\n"
+    ".inst 0x44828094  // srshl z20.s, p0/M, z20.s, z4.s\n"
+    ".inst 0x44828093  // srshl z19.s, p0/M, z19.s, z4.s\n"
+    ".inst 0x04a37421  // sqrdmulh z1.s, z1.s, z3.s\n"
     ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    ".inst 0x44828041  // srshl z1.s, p0/M, z1.s, z2.s\n"
+    ".inst 0x44828051  // srshl z17.s, p0/M, z17.s, z2.s\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
     ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
-    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
-    ".inst 0x44829020  // srshl z0.s, p4/M, z0.s, z1.s\n"
-    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
-    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
-    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
-    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
-    ".inst 0x44829052  // srshl z18.s, p4/M, z18.s, z2.s\n"
-    ".inst 0x4482905d  // srshl z29.s, p4/M, z29.s, z2.s\n"
-    ".inst 0x4482905c  // srshl z28.s, p4/M, z28.s, z2.s\n"
+    ".inst 0x44828040  // srshl z0.s, p0/M, z0.s, z2.s\n"
+    ".inst 0x4482805f  // srshl z31.s, p0/M, z31.s, z2.s\n"
     ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
-    ".inst 0x04a37652  // sqrdmulh z18.s, z18.s, z3.s\n"
     ".inst 0x04a377bd  // sqrdmulh z29.s, z29.s, z3.s\n"
+    ".inst 0x4482805e  // srshl z30.s, p0/M, z30.s, z2.s\n"
+    ".inst 0x4482805d  // srshl z29.s, p0/M, z29.s, z2.s\n"
     ".inst 0x04a3779c  // sqrdmulh z28.s, z28.s, z3.s\n"
-    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
-    ".inst 0x44829032  // srshl z18.s, p4/M, z18.s, z1.s\n"
-    ".inst 0x4482903d  // srshl z29.s, p4/M, z29.s, z1.s\n"
-    ".inst 0x4482903c  // srshl z28.s, p4/M, z28.s, z1.s\n"
-    ".inst 0x4482905b  // srshl z27.s, p4/M, z27.s, z2.s\n"
-    ".inst 0x4482905a  // srshl z26.s, p4/M, z26.s, z2.s\n"
-    ".inst 0x44829059  // srshl z25.s, p4/M, z25.s, z2.s\n"
-    ".inst 0x44829058  // srshl z24.s, p4/M, z24.s, z2.s\n"
     ".inst 0x04a3777b  // sqrdmulh z27.s, z27.s, z3.s\n"
+    ".inst 0x4482805c  // srshl z28.s, p0/M, z28.s, z2.s\n"
+    ".inst 0x4482805b  // srshl z27.s, p0/M, z27.s, z2.s\n"
     ".inst 0x04a3775a  // sqrdmulh z26.s, z26.s, z3.s\n"
     ".inst 0x04a37739  // sqrdmulh z25.s, z25.s, z3.s\n"
+    ".inst 0x4482805a  // srshl z26.s, p0/M, z26.s, z2.s\n"
+    ".inst 0x44828059  // srshl z25.s, p0/M, z25.s, z2.s\n"
     ".inst 0x04a37718  // sqrdmulh z24.s, z24.s, z3.s\n"
-    ".inst 0x4482903b  // srshl z27.s, p4/M, z27.s, z1.s\n"
-    ".inst 0x4482903a  // srshl z26.s, p4/M, z26.s, z1.s\n"
-    ".inst 0x44829039  // srshl z25.s, p4/M, z25.s, z1.s\n"
-    ".inst 0x44829038  // srshl z24.s, p4/M, z24.s, z1.s\n"
-    ".inst 0x44829057  // srshl z23.s, p4/M, z23.s, z2.s\n"
-    ".inst 0x44829056  // srshl z22.s, p4/M, z22.s, z2.s\n"
-    ".inst 0x44829055  // srshl z21.s, p4/M, z21.s, z2.s\n"
-    ".inst 0x44829054  // srshl z20.s, p4/M, z20.s, z2.s\n"
     ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
+    ".inst 0x44828058  // srshl z24.s, p0/M, z24.s, z2.s\n"
+    ".inst 0x44828057  // srshl z23.s, p0/M, z23.s, z2.s\n"
     ".inst 0x04a376d6  // sqrdmulh z22.s, z22.s, z3.s\n"
     ".inst 0x04a376b5  // sqrdmulh z21.s, z21.s, z3.s\n"
+    ".inst 0x44828056  // srshl z22.s, p0/M, z22.s, z2.s\n"
+    ".inst 0x44828055  // srshl z21.s, p0/M, z21.s, z2.s\n"
     ".inst 0x04a37694  // sqrdmulh z20.s, z20.s, z3.s\n"
-    ".inst 0x44829037  // srshl z23.s, p4/M, z23.s, z1.s\n"
-    ".inst 0x44829036  // srshl z22.s, p4/M, z22.s, z1.s\n"
-    ".inst 0x44829035  // srshl z21.s, p4/M, z21.s, z1.s\n"
-    ".inst 0x44829034  // srshl z20.s, p4/M, z20.s, z1.s\n"
-    "not z19.s, p4/M, z4.s\n"
-    "smax z0.s, p4/M, z0.s, z19.s\n"
-    "smax z17.s, p4/M, z17.s, z19.s\n"
-    "smax z31.s, p4/M, z31.s, z19.s\n"
-    "smax z16.s, p4/M, z16.s, z19.s\n"
-    "smin z0.s, p4/M, z0.s, z4.s\n"
-    "smin z17.s, p4/M, z17.s, z4.s\n"
-    "smin z31.s, p4/M, z31.s, z4.s\n"
-    "smin z16.s, p4/M, z16.s, z4.s\n"
-    "smax z30.s, p4/M, z30.s, z19.s\n"
-    "trn1 z17.h, z0.h, z17.h\n"
-    "smax z18.s, p4/M, z18.s, z19.s\n"
-    "trn1 z16.h, z31.h, z16.h\n"
-    "smin z30.s, p4/M, z30.s, z4.s\n"
+    ".inst 0x04a37673  // sqrdmulh z19.s, z19.s, z3.s\n"
+    ".inst 0x44828054  // srshl z20.s, p0/M, z20.s, z2.s\n"
+    ".inst 0x44828053  // srshl z19.s, p0/M, z19.s, z2.s\n"
+    "mov z18.s, #0x7f\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z17.s, p0/M, z17.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smax z30.s, p0/M, z30.s, z16.s\n"
+    "smax z29.s, p0/M, z29.s, z16.s\n"
+    "smax z28.s, p0/M, z28.s, z16.s\n"
+    "smax z27.s, p0/M, z27.s, z16.s\n"
+    "smax z26.s, p0/M, z26.s, z16.s\n"
+    "smax z25.s, p0/M, z25.s, z16.s\n"
+    "smax z24.s, p0/M, z24.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z22.s, p0/M, z22.s, z16.s\n"
+    "smax z21.s, p0/M, z21.s, z16.s\n"
+    "smax z20.s, p0/M, z20.s, z16.s\n"
+    "smax z19.s, p0/M, z19.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "smin z17.s, p0/M, z17.s, z18.s\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
+    "trn1 z17.h, z1.h, z17.h\n"
+    "smin z31.s, p0/M, z31.s, z18.s\n"
+    "smin z30.s, p0/M, z30.s, z18.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
-    "smin z18.s, p4/M, z18.s, z4.s\n"
-    "incb x28, ALL, MUL #4\n"
-    "smax z29.s, p4/M, z29.s, z19.s\n"
-    "smax z28.s, p4/M, z28.s, z19.s\n"
-    "smax z27.s, p4/M, z27.s, z19.s\n"
-    "smax z26.s, p4/M, z26.s, z19.s\n"
-    "trn1 z18.h, z30.h, z18.h\n"
-    "smin z29.s, p4/M, z29.s, z4.s\n"
-    "smin z28.s, p4/M, z28.s, z4.s\n"
-    "smin z27.s, p4/M, z27.s, z4.s\n"
-    "smin z26.s, p4/M, z26.s, z4.s\n"
-    "smax z25.s, p4/M, z25.s, z19.s\n"
-    "trn1 z16.h, z29.h, z28.h\n"
-    "smax z24.s, p4/M, z24.s, z19.s\n"
-    "trn1 z17.h, z27.h, z26.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x27]\n"
-    "smin z25.s, p4/M, z25.s, z4.s\n"
-    "incb x27, ALL, MUL #4\n"
-    "smin z24.s, p4/M, z24.s, z4.s\n"
-    "smax z23.s, p4/M, z23.s, z19.s\n"
-    "smax z22.s, p4/M, z22.s, z19.s\n"
-    "smax z21.s, p4/M, z21.s, z19.s\n"
-    "smax z20.s, p4/M, z20.s, z19.s\n"
-    "trn1 z16.h, z25.h, z24.h\n"
-    "smin z23.s, p4/M, z23.s, z4.s\n"
+    "smin z29.s, p0/M, z29.s, z18.s\n"
+    "smin z28.s, p0/M, z28.s, z18.s\n"
+    "trn1 z17.h, z30.h, z29.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "smin z27.s, p0/M, z27.s, z18.s\n"
+    "smin z26.s, p0/M, z26.s, z18.s\n"
+    "trn1 z16.h, z28.h, z27.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x26]\n"
-    "smin z22.s, p4/M, z22.s, z4.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smin z21.s, p4/M, z21.s, z4.s\n"
-    "smin z20.s, p4/M, z20.s, z4.s\n"
-    "trn1 z17.h, z23.h, z22.h\n"
-    "trn1 z16.h, z21.h, z20.h\n"
+    "smin z25.s, p0/M, z25.s, z18.s\n"
+    "smin z24.s, p0/M, z24.s, z18.s\n"
+    "trn1 z17.h, z26.h, z25.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x27]\n"
+    "smin z23.s, p0/M, z23.s, z18.s\n"
+    "smin z22.s, p0/M, z22.s, z18.s\n"
+    "trn1 z16.h, z24.h, z23.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x25]\n"
+    "smin z21.s, p0/M, z21.s, z18.s\n"
+    "smin z20.s, p0/M, z20.s, z18.s\n"
+    "trn1 z17.h, z22.h, z21.h\n"
+    "st1b { z16.b }, p2, [%x[outptr], x26]\n"
+    "smin z19.s, p0/M, z19.s, z18.s\n"
+    "trn1 z16.h, z20.h, z19.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "st1b { z16.b }, p1, [%x[outptr], x25]\n"
     "incb x25, ALL, MUL #4\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "incb x28, ALL, MUL #4\n"
+    "incb x27, ALL, MUL #4\n"
+    "incb x26, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
+    "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z8.b, #0x80\n"
     "mov x19, %x[inptrs]\n"
-    "lsr x24, %x[n_valid_cells], #0x2\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
     "add x19, x19, #0x20\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "smax z8.b, p4/M, z8.b, z19.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n smax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n smax z23.b, p4/M, z23.b, z0.b\n"
-    "smax z19.b, p4/M, z19.b, z23.b\n"
-    "smax z8.b, p4/M, z8.b, z19.b\n"
+    "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+    "smax z19.b, p0/M, z19.b, z23.b\n"
+    "smax z8.b, p0/M, z8.b, z19.b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "smax z8.b, p4/M, z8.b, z3.b\n"
+    "smax z8.b, p0/M, z8.b, z4.b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z4.s, #0x7f\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    "ld1rw { z3.s }, p4/Z, [x19]\n"
     ".inst 0x4508a111  // sshllb z17.h, z8.b, #0x0\n"
+    ".inst 0x4508a517  // sshllt z23.h, z8.b, #0x0\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    ".inst 0x4508a510  // sshllt z16.h, z8.b, #0x0\n"
-    "ld1rw { z2.s }, p4/Z, [x19]\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    ".inst 0x4510a220  // sshllb z0.s, z17.h, #0x0\n"
-    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4510a221  // sshllb z1.s, z17.h, #0x0\n"
     ".inst 0x4510a631  // sshllt z17.s, z17.h, #0x0\n"
-    ".inst 0x4510a21f  // sshllb z31.s, z16.h, #0x0\n"
-    ".inst 0x4510a610  // sshllt z16.s, z16.h, #0x0\n"
-    ".inst 0x44829040  // srshl z0.s, p4/M, z0.s, z2.s\n"
-    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
-    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
-    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
-    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    ".inst 0x4510a2e0  // sshllb z0.s, z23.h, #0x0\n"
+    ".inst 0x4510a6ff  // sshllt z31.s, z23.h, #0x0\n"
+    ".inst 0x44828081  // srshl z1.s, p0/M, z1.s, z4.s\n"
+    ".inst 0x44828091  // srshl z17.s, p0/M, z17.s, z4.s\n"
+    ".inst 0x44828080  // srshl z0.s, p0/M, z0.s, z4.s\n"
+    ".inst 0x4482809f  // srshl z31.s, p0/M, z31.s, z4.s\n"
+    ".inst 0x04a37421  // sqrdmulh z1.s, z1.s, z3.s\n"
     ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+    "ld1rw { z2.s }, p0/Z, [x19]\n"
+    ".inst 0x04a37400  // sqrdmulh z0.s, z0.s, z3.s\n"
     ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
-    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
-    ".inst 0x44829020  // srshl z0.s, p4/M, z0.s, z1.s\n"
-    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
-    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
-    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
-    "not z19.s, p4/M, z4.s\n"
-    "smax z0.s, p4/M, z0.s, z19.s\n"
-    "smax z17.s, p4/M, z17.s, z19.s\n"
-    "smax z31.s, p4/M, z31.s, z19.s\n"
-    "smax z16.s, p4/M, z16.s, z19.s\n"
-    "smin z0.s, p4/M, z0.s, z4.s\n"
-    "smin z17.s, p4/M, z17.s, z4.s\n"
-    "smin z31.s, p4/M, z31.s, z4.s\n"
-    "smin z16.s, p4/M, z16.s, z4.s\n"
-    "trn1 z17.h, z0.h, z17.h\n"
-    "trn1 z16.h, z31.h, z16.h\n"
+    "mov z18.s, #0x7f\n"
+    ".inst 0x44828041  // srshl z1.s, p0/M, z1.s, z2.s\n"
+    ".inst 0x44828051  // srshl z17.s, p0/M, z17.s, z2.s\n"
+    ".inst 0x44828040  // srshl z0.s, p0/M, z0.s, z2.s\n"
+    ".inst 0x4482805f  // srshl z31.s, p0/M, z31.s, z2.s\n"
+    "not z16.s, p0/M, z18.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z17.s, p0/M, z17.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "smin z17.s, p0/M, z17.s, z18.s\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
+    "trn1 z17.h, z1.h, z17.h\n"
+    "smin z31.s, p0/M, z31.s, z18.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
     "incb x28\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
     : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -383,4 +383,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp
index a2bfec7..714530b 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_u8_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
 
-struct sve_u8_nhwc_avg_generic_depthfirst
+struct sve_u8_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_u8_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
   sve_u8_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_u8_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
index 4c72461..f853e9d 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,11 +23,12 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -84,30 +85,31 @@
       f_rescale_value *= 2.0f;
     }
 
-    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
-    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x26, #0x0\n"
     "cntb x25\n"
     "cntb x24, ALL, MUL #2\n"
     "cntb x23, ALL, MUL #3\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
-    "whilelt p2.b, x25, %x[n_channels]\n"
-    "whilelt p1.b, x24, %x[n_channels]\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "mov z11.s, #0x0\n"
@@ -124,41 +126,41 @@
     "mov z0.s, #0x0\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
     ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
     ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
     ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
     ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
     ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
     ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
     ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
     ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
     ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
     ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
@@ -202,156 +204,156 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508abb5  // ushllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508afb4  // ushllt z20.h, z29.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508ab73  // ushllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508af72  // ushllt z18.h, z27.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
-    ".inst 0x4508abb0  // ushllb z16.h, z29.b, #0x0\n"
-    ".inst 0x4590496b  // uaddwb z11.s, z11.s, z16.h\n"
-    ".inst 0x45904d4a  // uaddwt z10.s, z10.s, z16.h\n"
-    ".inst 0x4508afb0  // ushllt z16.h, z29.b, #0x0\n"
-    ".inst 0x45904929  // uaddwb z9.s, z9.s, z16.h\n"
-    ".inst 0x45904d08  // uaddwt z8.s, z8.s, z16.h\n"
-    ".inst 0x4508ab70  // ushllb z16.h, z27.b, #0x0\n"
-    ".inst 0x459048e7  // uaddwb z7.s, z7.s, z16.h\n"
-    ".inst 0x45904cc6  // uaddwt z6.s, z6.s, z16.h\n"
-    ".inst 0x4508af70  // ushllt z16.h, z27.b, #0x0\n"
-    ".inst 0x459048a5  // uaddwb z5.s, z5.s, z16.h\n"
-    ".inst 0x45904c84  // uaddwt z4.s, z4.s, z16.h\n"
-    ".inst 0x4508ab30  // ushllb z16.h, z25.b, #0x0\n"
-    ".inst 0x45904863  // uaddwb z3.s, z3.s, z16.h\n"
-    ".inst 0x45904c42  // uaddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508ab31  // ushllb z17.h, z25.b, #0x0\n"
     ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
     ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
     ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z20.s, #0x0\n"
-    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
-    "mov z19.s, #0xff\n"
-    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
     ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
     ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
     ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
     ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
     ".inst 0x04b1756b  // sqdmulh z11.s, z11.s, z17.s\n"
     ".inst 0x04b1754a  // sqdmulh z10.s, z10.s, z17.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
     ".inst 0x04b17529  // sqdmulh z9.s, z9.s, z17.s\n"
     ".inst 0x04b17508  // sqdmulh z8.s, z8.s, z17.s\n"
+    ".inst 0x4482820b  // srshl z11.s, p0/M, z11.s, z16.s\n"
+    ".inst 0x4482820a  // srshl z10.s, p0/M, z10.s, z16.s\n"
     ".inst 0x04b174e7  // sqdmulh z7.s, z7.s, z17.s\n"
     ".inst 0x04b174c6  // sqdmulh z6.s, z6.s, z17.s\n"
+    ".inst 0x44828209  // srshl z9.s, p0/M, z9.s, z16.s\n"
+    ".inst 0x44828208  // srshl z8.s, p0/M, z8.s, z16.s\n"
     ".inst 0x04b174a5  // sqdmulh z5.s, z5.s, z17.s\n"
     ".inst 0x04b17484  // sqdmulh z4.s, z4.s, z17.s\n"
+    ".inst 0x44828207  // srshl z7.s, p0/M, z7.s, z16.s\n"
+    ".inst 0x44828206  // srshl z6.s, p0/M, z6.s, z16.s\n"
     ".inst 0x04b17463  // sqdmulh z3.s, z3.s, z17.s\n"
     ".inst 0x04b17442  // sqdmulh z2.s, z2.s, z17.s\n"
+    ".inst 0x44828205  // srshl z5.s, p0/M, z5.s, z16.s\n"
+    ".inst 0x44828204  // srshl z4.s, p0/M, z4.s, z16.s\n"
     ".inst 0x04b17421  // sqdmulh z1.s, z1.s, z17.s\n"
     ".inst 0x04b17400  // sqdmulh z0.s, z0.s, z17.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    ".inst 0x4482920b  // srshl z11.s, p4/M, z11.s, z16.s\n"
-    ".inst 0x4482920a  // srshl z10.s, p4/M, z10.s, z16.s\n"
-    ".inst 0x44829209  // srshl z9.s, p4/M, z9.s, z16.s\n"
-    ".inst 0x44829208  // srshl z8.s, p4/M, z8.s, z16.s\n"
-    ".inst 0x44829207  // srshl z7.s, p4/M, z7.s, z16.s\n"
-    ".inst 0x44829206  // srshl z6.s, p4/M, z6.s, z16.s\n"
-    ".inst 0x44829205  // srshl z5.s, p4/M, z5.s, z16.s\n"
-    ".inst 0x44829204  // srshl z4.s, p4/M, z4.s, z16.s\n"
-    ".inst 0x44829203  // srshl z3.s, p4/M, z3.s, z16.s\n"
-    ".inst 0x44829202  // srshl z2.s, p4/M, z2.s, z16.s\n"
-    ".inst 0x44829201  // srshl z1.s, p4/M, z1.s, z16.s\n"
-    ".inst 0x44829200  // srshl z0.s, p4/M, z0.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z20.s\n"
-    "smax z14.s, p4/M, z14.s, z20.s\n"
-    "smax z13.s, p4/M, z13.s, z20.s\n"
-    "smax z12.s, p4/M, z12.s, z20.s\n"
-    "smin z15.s, p4/M, z15.s, z19.s\n"
-    "smin z14.s, p4/M, z14.s, z19.s\n"
-    "smin z13.s, p4/M, z13.s, z19.s\n"
-    "smin z12.s, p4/M, z12.s, z19.s\n"
-    "smax z11.s, p4/M, z11.s, z20.s\n"
+    ".inst 0x44828203  // srshl z3.s, p0/M, z3.s, z16.s\n"
+    ".inst 0x44828202  // srshl z2.s, p0/M, z2.s, z16.s\n"
+    ".inst 0x44828201  // srshl z1.s, p0/M, z1.s, z16.s\n"
+    ".inst 0x44828200  // srshl z0.s, p0/M, z0.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z18.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
-    "smax z10.s, p4/M, z10.s, z20.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
-    "smin z11.s, p4/M, z11.s, z19.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
-    "smin z10.s, p4/M, z10.s, z19.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smax z9.s, p4/M, z9.s, z20.s\n"
-    "smax z8.s, p4/M, z8.s, z20.s\n"
-    "smax z7.s, p4/M, z7.s, z20.s\n"
-    "smax z6.s, p4/M, z6.s, z20.s\n"
-    "trn1 z18.h, z11.h, z10.h\n"
-    "smin z9.s, p4/M, z9.s, z19.s\n"
-    "smin z8.s, p4/M, z8.s, z19.s\n"
-    "smin z7.s, p4/M, z7.s, z19.s\n"
-    "smin z6.s, p4/M, z6.s, z19.s\n"
-    "smax z5.s, p4/M, z5.s, z20.s\n"
+    "smin z11.s, p0/M, z11.s, z18.s\n"
+    "smin z10.s, p0/M, z10.s, z18.s\n"
+    "trn1 z17.h, z11.h, z10.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "smin z9.s, p0/M, z9.s, z18.s\n"
+    "smin z8.s, p0/M, z8.s, z18.s\n"
     "trn1 z16.h, z9.h, z8.h\n"
-    "smax z4.s, p4/M, z4.s, z20.s\n"
-    "trn1 z17.h, z7.h, z6.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
-    "smin z5.s, p4/M, z5.s, z19.s\n"
-    "incb x25, ALL, MUL #4\n"
-    "smin z4.s, p4/M, z4.s, z19.s\n"
-    "smax z3.s, p4/M, z3.s, z20.s\n"
-    "smax z2.s, p4/M, z2.s, z20.s\n"
-    "smax z1.s, p4/M, z1.s, z20.s\n"
-    "smax z0.s, p4/M, z0.s, z20.s\n"
-    "trn1 z16.h, z5.h, z4.h\n"
-    "smin z3.s, p4/M, z3.s, z19.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
-    "smin z2.s, p4/M, z2.s, z19.s\n"
-    "incb x24, ALL, MUL #4\n"
-    "smin z1.s, p4/M, z1.s, z19.s\n"
-    "smin z0.s, p4/M, z0.s, z19.s\n"
+    "smin z7.s, p0/M, z7.s, z18.s\n"
+    "smin z6.s, p0/M, z6.s, z18.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x25]\n"
+    "smin z5.s, p0/M, z5.s, z18.s\n"
+    "smin z4.s, p0/M, z4.s, z18.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "smin z3.s, p0/M, z3.s, z18.s\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
     "trn1 z17.h, z3.h, z2.h\n"
+    "st1b { z16.b }, p2, [%x[outptr], x24]\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
     "trn1 z16.h, z1.h, z0.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
     "incb x23, ALL, MUL #4\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "incb x26, ALL, MUL #4\n"
+    "incb x25, ALL, MUL #4\n"
+    "incb x24, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z15.s, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z14.s, #0x0\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z15.s, #0x0\n"
+    "mov z14.s, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "mov z13.s, #0x0\n"
     "mov z12.s, #0x0\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
     ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
     ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
@@ -365,45 +367,44 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
-    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
-    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
-    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z20.s, #0x0\n"
-    "ld1rw { z17.s }, p4/Z, [%x[rescale_ptr]]\n"
-    "mov z19.s, #0xff\n"
-    "ld1rw { z16.s }, p4/Z, [%x[shift_ptr]]\n"
+    "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
     ".inst 0x04b175ef  // sqdmulh z15.s, z15.s, z17.s\n"
     ".inst 0x04b175ce  // sqdmulh z14.s, z14.s, z17.s\n"
     ".inst 0x04b175ad  // sqdmulh z13.s, z13.s, z17.s\n"
     ".inst 0x04b1758c  // sqdmulh z12.s, z12.s, z17.s\n"
-    ".inst 0x4482920f  // srshl z15.s, p4/M, z15.s, z16.s\n"
-    ".inst 0x4482920e  // srshl z14.s, p4/M, z14.s, z16.s\n"
-    ".inst 0x4482920d  // srshl z13.s, p4/M, z13.s, z16.s\n"
-    ".inst 0x4482920c  // srshl z12.s, p4/M, z12.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z20.s\n"
-    "smax z14.s, p4/M, z14.s, z20.s\n"
-    "smax z13.s, p4/M, z13.s, z20.s\n"
-    "smax z12.s, p4/M, z12.s, z20.s\n"
-    "smin z15.s, p4/M, z15.s, z19.s\n"
-    "smin z14.s, p4/M, z14.s, z19.s\n"
-    "smin z13.s, p4/M, z13.s, z19.s\n"
-    "smin z12.s, p4/M, z12.s, z19.s\n"
+    ".inst 0x4482820f  // srshl z15.s, p0/M, z15.s, z16.s\n"
+    ".inst 0x4482820e  // srshl z14.s, p0/M, z14.s, z16.s\n"
+    ".inst 0x4482820d  // srshl z13.s, p0/M, z13.s, z16.s\n"
+    ".inst 0x4482820c  // srshl z12.s, p0/M, z12.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "mov z18.s, #0xff\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
     "incb x26\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
     : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -413,4 +414,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
index 11f485c..eae83b9 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,27 +31,18 @@
 
 void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
 
-struct sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst
+struct sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst : public DepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
+  using Parent = DepthfirstStrategy<uint8_t, uint8_t>;
 
-  typedef void (*kern_type)(unsigned int, const uint8_t *const *const, uint8_t *const *const, bool, unsigned int, unsigned int, unsigned int, unsigned int);
+  const static auto pooling_type = PoolingType::MAX;
+  const static auto pool_rows = 2u, pool_cols = 2u;
+  const static auto stride_rows = 1u, stride_cols = 1u;
 
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
+  sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *)
+  : Parent(pool_rows, pool_cols, stride_rows, stride_cols, 2, 2) {}
 
-  constexpr static unsigned int pool_rows(void) { return 2; }
-  constexpr static unsigned int pool_cols(void) { return 2; }
-
-  constexpr static unsigned int stride_rows(void) { return 1; }
-  constexpr static unsigned int stride_cols(void) { return 1; }
-
-  constexpr static unsigned int out_rows(void) { return 2; }
-  constexpr static unsigned int out_cols(void) { return 2; }
-
-  kern_type kernel = sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl;
-
-  sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst(const CPUInfo *) {}
+  Parent::KernelType get_kernel(void) const { return sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 92779d0..2a08610 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -64,76 +64,76 @@
 
   __asm__ __volatile__(
     "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
-    "ptrue p2.b\n"
     "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
     "mov x13, #0x0\n"
+    "whilelt p2.b, x13, x14\n"
     "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
-    "mov x12, #0x0\n"
-    "ldp x11, x10, [x20, #0x0]\n"
-    "whilelt p1.b, x13, x14\n"
+    "ldp x12, x11, [x20, #0x0]\n"
+    "ptrue p1.b\n"
+    "mov x10, #0x0\n"
     "ldp x9, x28, [x20, #0x10]\n"
     "ldp x27, x26, [x19, #0x0]\n"
     "ldp x25, x24, [x19, #0x10]\n"
     "ldp x23, x22, [x19, #0x20]\n"
     "ldp x21, x20, [x19, #0x30]\n"
     "ldr x19, [x19, #0x40]\n"
-    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
-    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
-    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
-    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
-    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
-    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
-    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
-    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
-    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+    "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+    "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+    "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+    "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+    "ld1b { z23.b }, p2/Z, [x19, x13]\n"
     "incw x13\n"
-    "whilelt p1.b, x13, x14\n"
+    "whilelt p2.b, x13, x14\n"
     "b.none 2f\n"
     "1:"  // Vector: Loop
-    "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
-    "ld1b { z31.b }, p1/Z, [x26, x13]\n"
-    "whilelt p0.b, x12, x14\n"
-    "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
-    "ld1b { z30.b }, p1/Z, [x23, x13]\n"
-    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z27.b\n"
-    "ld1b { z29.b }, p1/Z, [x20, x13]\n"
-    "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
-    "ld1b { z27.b }, p1/Z, [x27, x13]\n"
-    "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z28.b\n"
-    "ld1b { z28.b }, p1/Z, [x24, x13]\n"
-    "movprfx z20, z26\n umax z20.b, p2/M, z20.b, z23.b\n"
-    "ld1b { z26.b }, p1/Z, [x22, x13]\n"
-    "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
-    "ld1b { z25.b }, p1/Z, [x25, x13]\n"
-    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z17.b\n"
-    "ld1b { z24.b }, p1/Z, [x21, x13]\n"
-    "movprfx z17, z21\n umax z17.b, p2/M, z17.b, z16.b\n"
-    "ld1b { z23.b }, p1/Z, [x19, x13]\n"
+    "movprfx z22, z31\n umax z22.b, p1/M, z22.b, z30.b\n"
+    "movprfx z21, z30\n umax z21.b, p1/M, z21.b, z29.b\n"
+    "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+    "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+    "movprfx z20, z28\n umax z20.b, p1/M, z20.b, z27.b\n"
+    "movprfx z17, z26\n umax z17.b, p1/M, z17.b, z25.b\n"
+    "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+    "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+    "movprfx z19, z24\n umax z19.b, p1/M, z19.b, z28.b\n"
+    "movprfx z18, z26\n umax z18.b, p1/M, z18.b, z23.b\n"
+    "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+    "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+    "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+    "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+    "whilelt p0.b, x10, x14\n"
+    "movprfx z16, z22\n umax z16.b, p1/M, z16.b, z20.b\n"
+    "ld1b { z23.b }, p2/Z, [x19, x13]\n"
     "incw x13\n"
-    "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
-    "st1b { z19.b }, p0, [x11, x12]\n"
-    "whilelt p1.b, x13, x14\n"
-    "st1b { z18.b }, p0, [x10, x12]\n"
-    "st1b { z17.b }, p0, [x9, x12]\n"
-    "st1b { z16.b }, p0, [x28, x12]\n"
-    "incw x12\n"
+    "whilelt p2.b, x13, x14\n"
+    "st1b { z16.b }, p0, [x12, x10]\n"
+    "movprfx z16, z17\n umax z16.b, p1/M, z16.b, z22.b\n"
+    "movprfx z17, z21\n umax z17.b, p1/M, z17.b, z19.b\n"
+    "st1b { z16.b }, p0, [x11, x10]\n"
+    "movprfx z16, z21\n umax z16.b, p1/M, z16.b, z18.b\n"
+    "st1b { z17.b }, p0, [x9, x10]\n"
+    "st1b { z16.b }, p0, [x28, x10]\n"
+    "incw x10\n"
     "b.any 1b\n"
     "2:"  // Vector: Tail
-    "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
-    "whilelt p0.b, x12, x14\n"
-    "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
-    "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z27.b\n"
-    "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
-    "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z28.b\n"
-    "movprfx z20, z26\n umax z20.b, p2/M, z20.b, z23.b\n"
-    "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
-    "st1b { z19.b }, p0, [x11, x12]\n"
-    "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z17.b\n"
-    "movprfx z17, z21\n umax z17.b, p2/M, z17.b, z16.b\n"
-    "st1b { z18.b }, p0, [x10, x12]\n"
-    "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
-    "st1b { z17.b }, p0, [x9, x12]\n"
-    "st1b { z16.b }, p0, [x28, x12]\n"
+    "movprfx z22, z31\n umax z22.b, p1/M, z22.b, z30.b\n"
+    "movprfx z21, z30\n umax z21.b, p1/M, z21.b, z29.b\n"
+    "movprfx z20, z28\n umax z20.b, p1/M, z20.b, z27.b\n"
+    "movprfx z17, z26\n umax z17.b, p1/M, z17.b, z25.b\n"
+    "movprfx z19, z24\n umax z19.b, p1/M, z19.b, z28.b\n"
+    "movprfx z18, z26\n umax z18.b, p1/M, z18.b, z23.b\n"
+    "whilelt p0.b, x10, x14\n"
+    "movprfx z16, z22\n umax z16.b, p1/M, z16.b, z20.b\n"
+    "st1b { z16.b }, p0, [x12, x10]\n"
+    "movprfx z16, z17\n umax z16.b, p1/M, z16.b, z22.b\n"
+    "movprfx z17, z21\n umax z17.b, p1/M, z17.b, z19.b\n"
+    "st1b { z16.b }, p0, [x11, x10]\n"
+    "movprfx z16, z21\n umax z16.b, p1/M, z16.b, z18.b\n"
+    "st1b { z17.b }, p0, [x9, x10]\n"
+    "st1b { z16.b }, p0, [x28, x10]\n"
     :
     : [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
     : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp
index 92be064..9f3c3a4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,19 +33,11 @@
 
 void sve_u8_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
 
-struct sve_u8_nhwc_max_generic_depthfirst
+struct sve_u8_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_u8_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t>;
   sve_u8_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_u8_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
index de81d1c..0db1ad1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,6 +23,7 @@
  */
 
 #include <cstdint>
+#include <cstddef>
 
 #if defined(ARM_COMPUTE_ENABLE_SVE)
 
@@ -39,181 +40,180 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cntb x27\n"
     "cntb x26, ALL, MUL #2\n"
     "cntb x25, ALL, MUL #3\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
-    "whilelt p2.b, x27, %x[n_channels]\n"
-    "whilelt p1.b, x26, %x[n_channels]\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
+    "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x0\n"
     "mov z7.b, #0x0\n"
     "mov x19, %x[inptrs]\n"
     "mov z6.b, #0x0\n"
-    "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z5.b, #0x0\n"
-    "mov z4.b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
+    "umax z22.b, p0/M, z22.b, z30.b\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
+    "umax z21.b, p0/M, z21.b, z27.b\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "umax z7.b, p0/M, z7.b, z18.b\n"
+    "umax z6.b, p0/M, z6.b, z17.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "umax z5.b, p0/M, z5.b, z16.b\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "umax z21.b, p4/M, z21.b, z26.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "umax z16.b, p4/M, z16.b, z25.b\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "umax z20.b, p4/M, z20.b, z24.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "umax z18.b, p4/M, z18.b, z22.b\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "umax z17.b, p4/M, z17.b, z21.b\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "umax z16.b, p4/M, z16.b, z20.b\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "umax z7.b, p4/M, z7.b, z19.b\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "umax z6.b, p4/M, z6.b, z18.b\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "umax z5.b, p4/M, z5.b, z17.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "umax z4.b, p4/M, z4.b, z16.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
-    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
-    "umax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
-    "umax z21.b, p4/M, z21.b, z26.b\n"
-    "umax z16.b, p4/M, z16.b, z25.b\n"
-    "umax z20.b, p4/M, z20.b, z24.b\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "umax z18.b, p4/M, z18.b, z22.b\n"
-    "umax z17.b, p4/M, z17.b, z21.b\n"
-    "umax z16.b, p4/M, z16.b, z20.b\n"
-    "umax z7.b, p4/M, z7.b, z19.b\n"
-    "umax z6.b, p4/M, z6.b, z18.b\n"
-    "umax z5.b, p4/M, z5.b, z17.b\n"
-    "umax z4.b, p4/M, z4.b, z16.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+    "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
+    "umax z22.b, p0/M, z22.b, z30.b\n"
+    "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
+    "umax z21.b, p0/M, z21.b, z27.b\n"
+    "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
+    "umax z7.b, p0/M, z7.b, z18.b\n"
+    "umax z6.b, p0/M, z6.b, z17.b\n"
+    "umax z5.b, p0/M, z5.b, z16.b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z7.b, p4/M, z7.b, z3.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "umax z6.b, p4/M, z6.b, z31.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "umax z5.b, p4/M, z5.b, z28.b\n"
-    "umax z4.b, p4/M, z4.b, z16.b\n"
+    "umax z8.b, p0/M, z8.b, z4.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "umax z7.b, p0/M, z7.b, z0.b\n"
+    "umax z6.b, p0/M, z6.b, z29.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "umax z5.b, p0/M, z5.b, z26.b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z8.b }, p4, [%x[outptr], x28]\n"
     "incb x28, ALL, MUL #4\n"
-    "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+    "st1b { z7.b }, p3, [%x[outptr], x27]\n"
     "incb x27, ALL, MUL #4\n"
-    "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+    "st1b { z6.b }, p2, [%x[outptr], x26]\n"
     "incb x26, ALL, MUL #4\n"
-    "st1b { z4.b }, p0, [%x[outptr], x25]\n"
+    "st1b { z5.b }, p1, [%x[outptr], x25]\n"
     "incb x25, ALL, MUL #4\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z7.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
     "add x19, x19, #0x20\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "umax z7.b, p4/M, z7.b, z19.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "umax z7.b, p4/M, z7.b, z19.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z7.b, p4/M, z7.b, z3.b\n"
+    "umax z8.b, p0/M, z8.b, z4.b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z8.b }, p4, [%x[outptr], x28]\n"
     "incb x28\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp
index 91a9925..f9d25a1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_u8q_nhwc_avg_generic_depthfirst_impl(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
 
-struct sve_u8q_nhwc_avg_generic_depthfirst
+struct sve_u8q_nhwc_avg_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t window_cells, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::AVERAGE; }
-
-
-  kern_type kernel = sve_u8q_nhwc_avg_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
   sve_u8q_nhwc_avg_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_u8q_nhwc_avg_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index abf911c..903ada3 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,11 +24,12 @@
 
 #include "pooling.hpp"
 #include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <cmath>
 
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -86,12 +87,13 @@
       f_rescale_value *= 2.0f;
     }
 
-    rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
-    if (static_cast<int64_t>(rescale_value) == (1ll << 31))
+    int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
+    if (long_rescale_value == (1ll << 31))
     {
       shift_value++;
-      rescale_value >>= 1;
+      long_rescale_value >>= 1;
     }
+    rescale_value = static_cast<int32_t>(long_rescale_value);
   }
 
 
@@ -117,24 +119,24 @@
   );
 
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x26, #0x0\n"
     "cntb x25\n"
     "cntb x24, ALL, MUL #2\n"
     "cntb x23, ALL, MUL #3\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
-    "whilelt p2.b, x25, %x[n_channels]\n"
-    "whilelt p1.b, x24, %x[n_channels]\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
+    "whilelt p3.b, x25, %x[n_channels]\n"
+    "whilelt p2.b, x24, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
-    "mov z14.d, z15.d\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z13.d, z15.d\n"
+    "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z14.d, z15.d\n"
+    "mov z13.d, z15.d\n"
     "mov z12.d, z15.d\n"
     "mov z11.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
     "mov z10.d, z15.d\n"
     "mov z9.d, z15.d\n"
     "mov z8.d, z15.d\n"
@@ -148,41 +150,41 @@
     "mov z0.d, z15.d\n"
     "cbz x22, 4f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 2 inputs loop
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x455c0bb5  // uaddlb z21.h, z29.b, z28.b\n"
     ".inst 0x455c0fb4  // uaddlt z20.h, z29.b, z28.b\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x455a0b73  // uaddlb z19.h, z27.b, z26.b\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
     ".inst 0x455a0f72  // uaddlt z18.h, z27.b, z26.b\n"
-    "ld1b { z28.b }, p2/Z, [x20, x25]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
     ".inst 0x45580b31  // uaddlb z17.h, z25.b, z24.b\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
     ".inst 0x45580f30  // uaddlt z16.h, z25.b, z24.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x24]\n"
+    "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
     ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
     ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z24.b }, p0/Z, [x20, x23]\n"
+    "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
     ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
     ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z24.b }, p1/Z, [x20, x23]\n"
     ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
     ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
     ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
@@ -226,191 +228,191 @@
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
+    "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+    "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+    ".inst 0x4508abb5  // ushllb z21.h, z29.b, #0x0\n"
+    ".inst 0x4508afb4  // ushllt z20.h, z29.b, #0x0\n"
+    "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+    ".inst 0x4508ab73  // ushllb z19.h, z27.b, #0x0\n"
+    ".inst 0x4508af72  // ushllt z18.h, z27.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
-    "ld1b { z29.b }, p2/Z, [x21, x25]\n"
-    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
-    "ld1b { z27.b }, p1/Z, [x21, x24]\n"
-    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
-    "ld1b { z25.b }, p0/Z, [x21, x23]\n"
-    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
-    ".inst 0x4508abb0  // ushllb z16.h, z29.b, #0x0\n"
-    ".inst 0x4590496b  // uaddwb z11.s, z11.s, z16.h\n"
-    ".inst 0x45904d4a  // uaddwt z10.s, z10.s, z16.h\n"
-    ".inst 0x4508afb0  // ushllt z16.h, z29.b, #0x0\n"
-    ".inst 0x45904929  // uaddwb z9.s, z9.s, z16.h\n"
-    ".inst 0x45904d08  // uaddwt z8.s, z8.s, z16.h\n"
-    ".inst 0x4508ab70  // ushllb z16.h, z27.b, #0x0\n"
-    ".inst 0x459048e7  // uaddwb z7.s, z7.s, z16.h\n"
-    ".inst 0x45904cc6  // uaddwt z6.s, z6.s, z16.h\n"
-    ".inst 0x4508af70  // ushllt z16.h, z27.b, #0x0\n"
-    ".inst 0x459048a5  // uaddwb z5.s, z5.s, z16.h\n"
-    ".inst 0x45904c84  // uaddwt z4.s, z4.s, z16.h\n"
-    ".inst 0x4508ab30  // ushllb z16.h, z25.b, #0x0\n"
-    ".inst 0x45904863  // uaddwb z3.s, z3.s, z16.h\n"
-    ".inst 0x45904c42  // uaddwt z2.s, z2.s, z16.h\n"
+    ".inst 0x4508ab31  // ushllb z17.h, z25.b, #0x0\n"
     ".inst 0x4508af30  // ushllt z16.h, z25.b, #0x0\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    ".inst 0x4595496b  // uaddwb z11.s, z11.s, z21.h\n"
+    ".inst 0x45954d4a  // uaddwt z10.s, z10.s, z21.h\n"
+    ".inst 0x45944929  // uaddwb z9.s, z9.s, z20.h\n"
+    ".inst 0x45944d08  // uaddwt z8.s, z8.s, z20.h\n"
+    ".inst 0x459348e7  // uaddwb z7.s, z7.s, z19.h\n"
+    ".inst 0x45934cc6  // uaddwt z6.s, z6.s, z19.h\n"
+    ".inst 0x459248a5  // uaddwb z5.s, z5.s, z18.h\n"
+    ".inst 0x45924c84  // uaddwt z4.s, z4.s, z18.h\n"
+    ".inst 0x45914863  // uaddwb z3.s, z3.s, z17.h\n"
+    ".inst 0x45914c42  // uaddwt z2.s, z2.s, z17.h\n"
     ".inst 0x45904821  // uaddwb z1.s, z1.s, z16.h\n"
     ".inst 0x45904c00  // uaddwt z0.s, z0.s, z16.h\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z21.s, #0x0\n"
-    "ld1rw { z20.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b075ef  // sqrdmulh z15.s, z15.s, z16.s\n"
+    ".inst 0x4482824b  // srshl z11.s, p0/M, z11.s, z18.s\n"
+    ".inst 0x4482824a  // srshl z10.s, p0/M, z10.s, z18.s\n"
+    ".inst 0x04b075ce  // sqrdmulh z14.s, z14.s, z16.s\n"
+    ".inst 0x04b075ad  // sqrdmulh z13.s, z13.s, z16.s\n"
+    ".inst 0x44828249  // srshl z9.s, p0/M, z9.s, z18.s\n"
+    ".inst 0x44828248  // srshl z8.s, p0/M, z8.s, z18.s\n"
+    ".inst 0x04b0758c  // sqrdmulh z12.s, z12.s, z16.s\n"
+    ".inst 0x04b0756b  // sqrdmulh z11.s, z11.s, z16.s\n"
+    ".inst 0x44828247  // srshl z7.s, p0/M, z7.s, z18.s\n"
+    ".inst 0x44828246  // srshl z6.s, p0/M, z6.s, z18.s\n"
+    ".inst 0x04b0754a  // sqrdmulh z10.s, z10.s, z16.s\n"
+    ".inst 0x04b07529  // sqrdmulh z9.s, z9.s, z16.s\n"
+    ".inst 0x44828245  // srshl z5.s, p0/M, z5.s, z18.s\n"
+    ".inst 0x44828244  // srshl z4.s, p0/M, z4.s, z18.s\n"
+    ".inst 0x04b07508  // sqrdmulh z8.s, z8.s, z16.s\n"
+    ".inst 0x04b074e7  // sqrdmulh z7.s, z7.s, z16.s\n"
+    ".inst 0x44828243  // srshl z3.s, p0/M, z3.s, z18.s\n"
+    ".inst 0x44828242  // srshl z2.s, p0/M, z2.s, z18.s\n"
+    ".inst 0x04b074c6  // sqrdmulh z6.s, z6.s, z16.s\n"
+    ".inst 0x04b074a5  // sqrdmulh z5.s, z5.s, z16.s\n"
+    ".inst 0x44828241  // srshl z1.s, p0/M, z1.s, z18.s\n"
+    ".inst 0x44828240  // srshl z0.s, p0/M, z0.s, z18.s\n"
+    ".inst 0x04b07484  // sqrdmulh z4.s, z4.s, z16.s\n"
+    ".inst 0x04b07463  // sqrdmulh z3.s, z3.s, z16.s\n"
+    ".inst 0x04b07442  // sqrdmulh z2.s, z2.s, z16.s\n"
+    ".inst 0x04b07421  // sqrdmulh z1.s, z1.s, z16.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "mov z19.s, #0xff\n"
-    "ld1rw { z18.s }, p4/Z, [%x[left_shift]]\n"
-    "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
-    ".inst 0x4482924f  // srshl z15.s, p4/M, z15.s, z18.s\n"
-    "ld1rw { z16.s }, p4/Z, [x19]\n"
-    ".inst 0x4482924e  // srshl z14.s, p4/M, z14.s, z18.s\n"
-    ".inst 0x4482924d  // srshl z13.s, p4/M, z13.s, z18.s\n"
-    ".inst 0x4482924c  // srshl z12.s, p4/M, z12.s, z18.s\n"
-    ".inst 0x4482924b  // srshl z11.s, p4/M, z11.s, z18.s\n"
-    ".inst 0x04b475ef  // sqrdmulh z15.s, z15.s, z20.s\n"
-    ".inst 0x04b475ce  // sqrdmulh z14.s, z14.s, z20.s\n"
-    ".inst 0x04b475ad  // sqrdmulh z13.s, z13.s, z20.s\n"
-    ".inst 0x04b4758c  // sqrdmulh z12.s, z12.s, z20.s\n"
-    ".inst 0x04b4756b  // sqrdmulh z11.s, z11.s, z20.s\n"
-    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
-    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
-    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
-    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    ".inst 0x4482822f  // srshl z15.s, p0/M, z15.s, z17.s\n"
+    ".inst 0x04b07400  // sqrdmulh z0.s, z0.s, z16.s\n"
+    ".inst 0x4482822e  // srshl z14.s, p0/M, z14.s, z17.s\n"
+    ".inst 0x4482822d  // srshl z13.s, p0/M, z13.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x4482822c  // srshl z12.s, p0/M, z12.s, z17.s\n"
+    ".inst 0x4482822b  // srshl z11.s, p0/M, z11.s, z17.s\n"
     "add z15.s, z15.s, z16.s\n"
     "add z14.s, z14.s, z16.s\n"
+    ".inst 0x4482822a  // srshl z10.s, p0/M, z10.s, z17.s\n"
+    ".inst 0x44828229  // srshl z9.s, p0/M, z9.s, z17.s\n"
     "add z13.s, z13.s, z16.s\n"
     "add z12.s, z12.s, z16.s\n"
-    ".inst 0x4482922b  // srshl z11.s, p4/M, z11.s, z17.s\n"
-    ".inst 0x4482924a  // srshl z10.s, p4/M, z10.s, z18.s\n"
-    ".inst 0x44829249  // srshl z9.s, p4/M, z9.s, z18.s\n"
-    ".inst 0x44829248  // srshl z8.s, p4/M, z8.s, z18.s\n"
+    ".inst 0x44828228  // srshl z8.s, p0/M, z8.s, z17.s\n"
+    ".inst 0x44828227  // srshl z7.s, p0/M, z7.s, z17.s\n"
     "add z11.s, z11.s, z16.s\n"
-    ".inst 0x04b4754a  // sqrdmulh z10.s, z10.s, z20.s\n"
-    ".inst 0x04b47529  // sqrdmulh z9.s, z9.s, z20.s\n"
-    ".inst 0x04b47508  // sqrdmulh z8.s, z8.s, z20.s\n"
-    ".inst 0x44829247  // srshl z7.s, p4/M, z7.s, z18.s\n"
-    ".inst 0x4482922a  // srshl z10.s, p4/M, z10.s, z17.s\n"
-    ".inst 0x44829229  // srshl z9.s, p4/M, z9.s, z17.s\n"
-    ".inst 0x44829228  // srshl z8.s, p4/M, z8.s, z17.s\n"
-    ".inst 0x04b474e7  // sqrdmulh z7.s, z7.s, z20.s\n"
     "add z10.s, z10.s, z16.s\n"
+    ".inst 0x44828226  // srshl z6.s, p0/M, z6.s, z17.s\n"
+    ".inst 0x44828225  // srshl z5.s, p0/M, z5.s, z17.s\n"
     "add z9.s, z9.s, z16.s\n"
     "add z8.s, z8.s, z16.s\n"
-    ".inst 0x44829227  // srshl z7.s, p4/M, z7.s, z17.s\n"
-    ".inst 0x44829246  // srshl z6.s, p4/M, z6.s, z18.s\n"
-    ".inst 0x44829245  // srshl z5.s, p4/M, z5.s, z18.s\n"
-    ".inst 0x44829244  // srshl z4.s, p4/M, z4.s, z18.s\n"
+    ".inst 0x44828224  // srshl z4.s, p0/M, z4.s, z17.s\n"
+    ".inst 0x44828223  // srshl z3.s, p0/M, z3.s, z17.s\n"
     "add z7.s, z7.s, z16.s\n"
-    ".inst 0x04b474c6  // sqrdmulh z6.s, z6.s, z20.s\n"
-    ".inst 0x04b474a5  // sqrdmulh z5.s, z5.s, z20.s\n"
-    ".inst 0x04b47484  // sqrdmulh z4.s, z4.s, z20.s\n"
-    ".inst 0x44829243  // srshl z3.s, p4/M, z3.s, z18.s\n"
-    ".inst 0x44829226  // srshl z6.s, p4/M, z6.s, z17.s\n"
-    ".inst 0x44829225  // srshl z5.s, p4/M, z5.s, z17.s\n"
-    ".inst 0x44829224  // srshl z4.s, p4/M, z4.s, z17.s\n"
-    ".inst 0x04b47463  // sqrdmulh z3.s, z3.s, z20.s\n"
     "add z6.s, z6.s, z16.s\n"
+    ".inst 0x44828222  // srshl z2.s, p0/M, z2.s, z17.s\n"
+    ".inst 0x44828221  // srshl z1.s, p0/M, z1.s, z17.s\n"
     "add z5.s, z5.s, z16.s\n"
     "add z4.s, z4.s, z16.s\n"
-    ".inst 0x44829223  // srshl z3.s, p4/M, z3.s, z17.s\n"
-    ".inst 0x44829242  // srshl z2.s, p4/M, z2.s, z18.s\n"
-    ".inst 0x44829241  // srshl z1.s, p4/M, z1.s, z18.s\n"
-    ".inst 0x44829240  // srshl z0.s, p4/M, z0.s, z18.s\n"
+    ".inst 0x44828220  // srshl z0.s, p0/M, z0.s, z17.s\n"
     "add z3.s, z3.s, z16.s\n"
-    ".inst 0x04b47442  // sqrdmulh z2.s, z2.s, z20.s\n"
-    ".inst 0x04b47421  // sqrdmulh z1.s, z1.s, z20.s\n"
-    ".inst 0x04b47400  // sqrdmulh z0.s, z0.s, z20.s\n"
-    "smax z15.s, p4/M, z15.s, z21.s\n"
-    ".inst 0x44829222  // srshl z2.s, p4/M, z2.s, z17.s\n"
-    ".inst 0x44829221  // srshl z1.s, p4/M, z1.s, z17.s\n"
-    ".inst 0x44829220  // srshl z0.s, p4/M, z0.s, z17.s\n"
-    "smin z15.s, p4/M, z15.s, z19.s\n"
     "add z2.s, z2.s, z16.s\n"
     "add z1.s, z1.s, z16.s\n"
     "add z0.s, z0.s, z16.s\n"
-    "smax z14.s, p4/M, z14.s, z21.s\n"
-    "smax z13.s, p4/M, z13.s, z21.s\n"
-    "smax z12.s, p4/M, z12.s, z21.s\n"
-    "smax z11.s, p4/M, z11.s, z21.s\n"
-    "smin z14.s, p4/M, z14.s, z19.s\n"
-    "smin z13.s, p4/M, z13.s, z19.s\n"
-    "smin z12.s, p4/M, z12.s, z19.s\n"
-    "smin z11.s, p4/M, z11.s, z19.s\n"
+    "mov z16.s, #0x0\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "mov z18.s, #0xff\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smax z11.s, p0/M, z11.s, z16.s\n"
+    "smax z10.s, p0/M, z10.s, z16.s\n"
+    "smax z9.s, p0/M, z9.s, z16.s\n"
+    "smax z8.s, p0/M, z8.s, z16.s\n"
+    "smax z7.s, p0/M, z7.s, z16.s\n"
+    "smax z6.s, p0/M, z6.s, z16.s\n"
+    "smax z5.s, p0/M, z5.s, z16.s\n"
+    "smax z4.s, p0/M, z4.s, z16.s\n"
+    "smax z3.s, p0/M, z3.s, z16.s\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
-    "smax z10.s, p4/M, z10.s, z21.s\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
-    "smax z9.s, p4/M, z9.s, z21.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
-    "smin z10.s, p4/M, z10.s, z19.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smin z9.s, p4/M, z9.s, z19.s\n"
-    "smax z8.s, p4/M, z8.s, z21.s\n"
-    "smax z7.s, p4/M, z7.s, z21.s\n"
-    "smax z6.s, p4/M, z6.s, z21.s\n"
-    "trn1 z18.h, z11.h, z10.h\n"
-    "smin z8.s, p4/M, z8.s, z19.s\n"
-    "smin z7.s, p4/M, z7.s, z19.s\n"
-    "smin z6.s, p4/M, z6.s, z19.s\n"
-    "smax z5.s, p4/M, z5.s, z21.s\n"
+    "smin z11.s, p0/M, z11.s, z18.s\n"
+    "smin z10.s, p0/M, z10.s, z18.s\n"
+    "trn1 z17.h, z11.h, z10.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+    "smin z9.s, p0/M, z9.s, z18.s\n"
+    "smin z8.s, p0/M, z8.s, z18.s\n"
     "trn1 z16.h, z9.h, z8.h\n"
-    "smax z4.s, p4/M, z4.s, z21.s\n"
-    "trn1 z17.h, z7.h, z6.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x25]\n"
-    "smin z5.s, p4/M, z5.s, z19.s\n"
-    "incb x25, ALL, MUL #4\n"
-    "smin z4.s, p4/M, z4.s, z19.s\n"
-    "smax z3.s, p4/M, z3.s, z21.s\n"
-    "smax z2.s, p4/M, z2.s, z21.s\n"
-    "smax z1.s, p4/M, z1.s, z21.s\n"
-    "smax z0.s, p4/M, z0.s, z21.s\n"
-    "trn1 z16.h, z5.h, z4.h\n"
-    "smin z3.s, p4/M, z3.s, z19.s\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x24]\n"
-    "smin z2.s, p4/M, z2.s, z19.s\n"
-    "incb x24, ALL, MUL #4\n"
-    "smin z1.s, p4/M, z1.s, z19.s\n"
-    "smin z0.s, p4/M, z0.s, z19.s\n"
+    "smin z7.s, p0/M, z7.s, z18.s\n"
+    "smin z6.s, p0/M, z6.s, z18.s\n"
+    "trn1 z17.h, z7.h, z6.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x25]\n"
+    "smin z5.s, p0/M, z5.s, z18.s\n"
+    "smin z4.s, p0/M, z4.s, z18.s\n"
+    "trn1 z16.h, z5.h, z4.h\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "smin z3.s, p0/M, z3.s, z18.s\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
     "trn1 z17.h, z3.h, z2.h\n"
+    "st1b { z16.b }, p2, [%x[outptr], x24]\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
     "trn1 z16.h, z1.h, z0.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x23]\n"
+    "st1b { z16.b }, p1, [%x[outptr], x23]\n"
     "incb x23, ALL, MUL #4\n"
-    "whilelt p0.b, x23, %x[n_channels]\n"
+    "whilelt p1.b, x23, %x[n_channels]\n"
+    "incb x26, ALL, MUL #4\n"
+    "incb x25, ALL, MUL #4\n"
+    "incb x24, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
-    "mov z14.d, z15.d\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z13.d, z15.d\n"
+    "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
     "lsr x22, %x[n_valid_cells], #0x1\n"
+    "mov z14.d, z15.d\n"
+    "mov z13.d, z15.d\n"
     "mov z12.d, z15.d\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x22, 11f\n"
     "ldp x21, x20, [x19, #0x0]\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    "add x19, x19, #0x10\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
     "subs x22, x22, #0x1\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 2 inputs loop
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
-    "ldp x21, x20, [x19, #0x0]\n"
-    "add x19, x19, #0x10\n"
     ".inst 0x455e0ff6  // uaddlt z22.h, z31.b, z30.b\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    "ldp x21, x20, [x19, #0x0]\n"
     "subs x22, x22, #0x1\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
     ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
-    "ld1b { z30.b }, p3/Z, [x20, x26]\n"
+    "add x19, x19, #0x10\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
     ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
     ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
+    "ld1b { z30.b }, p4/Z, [x20, x26]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 2 inputs tail
     ".inst 0x455e0bf7  // uaddlb z23.h, z31.b, z30.b\n"
@@ -424,56 +426,55 @@
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x21, [x19], #0x8\n"
+    "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+    ".inst 0x4508abf7  // ushllb z23.h, z31.b, #0x0\n"
+    ".inst 0x4508aff6  // ushllt z22.h, z31.b, #0x0\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z31.b }, p3/Z, [x21, x26]\n"
-    ".inst 0x4508abf1  // ushllb z17.h, z31.b, #0x0\n"
-    ".inst 0x4508aff0  // ushllt z16.h, z31.b, #0x0\n"
-    ".inst 0x459149ef  // uaddwb z15.s, z15.s, z17.h\n"
-    ".inst 0x45914dce  // uaddwt z14.s, z14.s, z17.h\n"
-    ".inst 0x459049ad  // uaddwb z13.s, z13.s, z16.h\n"
-    ".inst 0x45904d8c  // uaddwt z12.s, z12.s, z16.h\n"
+    ".inst 0x459749ef  // uaddwb z15.s, z15.s, z23.h\n"
+    ".inst 0x45974dce  // uaddwt z14.s, z14.s, z23.h\n"
+    ".inst 0x459649ad  // uaddwb z13.s, z13.s, z22.h\n"
+    ".inst 0x45964d8c  // uaddwt z12.s, z12.s, z22.h\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z21.s, #0x0\n"
-    "ld1rw { z20.s }, p4/Z, [%x[combined_rescale_value]]\n"
+    "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
+    "ld1rw { z16.s }, p0/Z, [%x[combined_rescale_value]]\n"
+    ".inst 0x4482824f  // srshl z15.s, p0/M, z15.s, z18.s\n"
+    ".inst 0x4482824e  // srshl z14.s, p0/M, z14.s, z18.s\n"
+    ".inst 0x4482824d  // srshl z13.s, p0/M, z13.s, z18.s\n"
+    ".inst 0x4482824c  // srshl z12.s, p0/M, z12.s, z18.s\n"
+    "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
+    ".inst 0x04b075ef  // sqrdmulh z15.s, z15.s, z16.s\n"
+    ".inst 0x04b075ce  // sqrdmulh z14.s, z14.s, z16.s\n"
+    ".inst 0x04b075ad  // sqrdmulh z13.s, z13.s, z16.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    "mov z19.s, #0xff\n"
-    "ld1rw { z18.s }, p4/Z, [%x[left_shift]]\n"
-    "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
-    ".inst 0x4482924f  // srshl z15.s, p4/M, z15.s, z18.s\n"
-    "ld1rw { z16.s }, p4/Z, [x19]\n"
-    ".inst 0x4482924e  // srshl z14.s, p4/M, z14.s, z18.s\n"
-    ".inst 0x4482924d  // srshl z13.s, p4/M, z13.s, z18.s\n"
-    ".inst 0x4482924c  // srshl z12.s, p4/M, z12.s, z18.s\n"
-    ".inst 0x04b475ef  // sqrdmulh z15.s, z15.s, z20.s\n"
-    ".inst 0x04b475ce  // sqrdmulh z14.s, z14.s, z20.s\n"
-    ".inst 0x04b475ad  // sqrdmulh z13.s, z13.s, z20.s\n"
-    ".inst 0x04b4758c  // sqrdmulh z12.s, z12.s, z20.s\n"
-    ".inst 0x4482922f  // srshl z15.s, p4/M, z15.s, z17.s\n"
-    ".inst 0x4482922e  // srshl z14.s, p4/M, z14.s, z17.s\n"
-    ".inst 0x4482922d  // srshl z13.s, p4/M, z13.s, z17.s\n"
-    ".inst 0x4482922c  // srshl z12.s, p4/M, z12.s, z17.s\n"
+    ".inst 0x4482822f  // srshl z15.s, p0/M, z15.s, z17.s\n"
+    ".inst 0x04b0758c  // sqrdmulh z12.s, z12.s, z16.s\n"
+    ".inst 0x4482822e  // srshl z14.s, p0/M, z14.s, z17.s\n"
+    ".inst 0x4482822d  // srshl z13.s, p0/M, z13.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x4482822c  // srshl z12.s, p0/M, z12.s, z17.s\n"
     "add z15.s, z15.s, z16.s\n"
     "add z14.s, z14.s, z16.s\n"
     "add z13.s, z13.s, z16.s\n"
     "add z12.s, z12.s, z16.s\n"
-    "smax z15.s, p4/M, z15.s, z21.s\n"
-    "smax z14.s, p4/M, z14.s, z21.s\n"
-    "smax z13.s, p4/M, z13.s, z21.s\n"
-    "smax z12.s, p4/M, z12.s, z21.s\n"
-    "smin z15.s, p4/M, z15.s, z19.s\n"
-    "smin z14.s, p4/M, z14.s, z19.s\n"
-    "smin z13.s, p4/M, z13.s, z19.s\n"
-    "smin z12.s, p4/M, z12.s, z19.s\n"
+    "mov z16.s, #0x0\n"
+    "smax z15.s, p0/M, z15.s, z16.s\n"
+    "smax z14.s, p0/M, z14.s, z16.s\n"
+    "mov z18.s, #0xff\n"
+    "smax z13.s, p0/M, z13.s, z16.s\n"
+    "smax z12.s, p0/M, z12.s, z16.s\n"
+    "smin z15.s, p0/M, z15.s, z18.s\n"
+    "smin z14.s, p0/M, z14.s, z18.s\n"
     "trn1 z17.h, z15.h, z14.h\n"
+    "smin z13.s, p0/M, z13.s, z18.s\n"
+    "smin z12.s, p0/M, z12.s, z18.s\n"
     "trn1 z16.h, z13.h, z12.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x26]\n"
     "incb x26\n"
-    "whilelt p3.b, x26, %x[n_channels]\n"
+    "whilelt p4.b, x26, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [outptr] "r" (outptr), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
     : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -483,4 +484,4 @@
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp
index 0d04ae5..eece6c0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,29 +26,21 @@
 
 #pragma once
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
 
 void sve_u8q_nhwc_max_generic_depthfirst_impl(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
 
-struct sve_u8q_nhwc_max_generic_depthfirst
+struct sve_u8q_nhwc_max_generic_depthfirst : IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>
 {
-  typedef uint8_t operand_type;
-  typedef uint8_t return_type;
-
-  typedef void (*kern_type)(const uint64_t, const uint64_t n_valid_cells, uint64_t n_channels, const uint8_t *const *const inptrs, uint8_t *outptr, const Requantize32 &qp);
-
-  constexpr static PoolingType pooling_type(void) { return PoolingType::MAX; }
-
-
-  kern_type kernel = sve_u8q_nhwc_max_generic_depthfirst_impl;
-
+  using Parent = IGenericDepthfirstStrategy<uint8_t, uint8_t, Requantize32>;
   sve_u8q_nhwc_max_generic_depthfirst(const CPUInfo *) {}
+  typename Parent::KernelType get_kernel(void) const override { return sve_u8q_nhwc_max_generic_depthfirst_impl; }
 };
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
index b632af9..26d2152 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,8 +24,9 @@
 
 #include "pooling.hpp"
 #include <cstdint>
+#include <cstddef>
 
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 
 namespace arm_conv {
 namespace pooling {
@@ -41,376 +42,375 @@
 )
 {
   __asm__ __volatile__(
-    "ptrue p4.b\n"
     "mov x28, #0x0\n"
     "cntb x27\n"
     "cntb x26, ALL, MUL #2\n"
     "cntb x25, ALL, MUL #3\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
-    "whilelt p2.b, x27, %x[n_channels]\n"
-    "whilelt p1.b, x26, %x[n_channels]\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
+    "whilelt p3.b, x27, %x[n_channels]\n"
+    "whilelt p2.b, x26, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "ptrue p0.b\n"
     "b.none 7f\n"
     "1:"  // 4-vectors of channels
-    "mov z10.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
-    "mov z9.b, #0x0\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
     "mov z8.b, #0x0\n"
     "mov z7.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
+    "mov z6.b, #0x0\n"
+    "mov z5.b, #0x0\n"
     "cbz x24, 4f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "beq 3f\n"
     "2:"  // 4-vectors of channels: 4 inputs loop
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
+    "umax z22.b, p0/M, z22.b, z30.b\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
+    "umax z21.b, p0/M, z21.b, z27.b\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+    "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+    "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+    "subs x24, x24, #0x1\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
+    "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+    "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+    "umax z7.b, p0/M, z7.b, z18.b\n"
+    "umax z6.b, p0/M, z6.b, z17.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+    "umax z5.b, p0/M, z5.b, z16.b\n"
     "add x19, x19, #0x20\n"
-    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "umax z21.b, p4/M, z21.b, z26.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "umax z16.b, p4/M, z16.b, z25.b\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
-    "umax z20.b, p4/M, z20.b, z24.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z30.b }, p2/Z, [x22, x27]\n"
-    "umax z18.b, p4/M, z18.b, z22.b\n"
-    "ld1b { z22.b }, p2/Z, [x21, x27]\n"
-    "umax z17.b, p4/M, z17.b, z21.b\n"
-    "ld1b { z29.b }, p2/Z, [x20, x27]\n"
-    "umax z16.b, p4/M, z16.b, z20.b\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "umax z10.b, p4/M, z10.b, z19.b\n"
-    "ld1b { z27.b }, p1/Z, [x22, x26]\n"
-    "umax z9.b, p4/M, z9.b, z18.b\n"
-    "ld1b { z21.b }, p1/Z, [x21, x26]\n"
-    "umax z8.b, p4/M, z8.b, z17.b\n"
-    "ld1b { z26.b }, p1/Z, [x20, x26]\n"
-    "umax z7.b, p4/M, z7.b, z16.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "ld1b { z25.b }, p0/Z, [x22, x25]\n"
-    "ld1b { z20.b }, p0/Z, [x21, x25]\n"
-    "ld1b { z24.b }, p0/Z, [x20, x25]\n"
+    "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+    "ld1b { z24.b }, p1/Z, [x20, x25]\n"
     "bgt 2b\n"
     "3:"  // 4-vectors of channels: 4 inputs tail
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
-    "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
-    "umax z22.b, p4/M, z22.b, z29.b\n"
-    "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
-    "umax z21.b, p4/M, z21.b, z26.b\n"
-    "umax z16.b, p4/M, z16.b, z25.b\n"
-    "umax z20.b, p4/M, z20.b, z24.b\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "umax z18.b, p4/M, z18.b, z22.b\n"
-    "umax z17.b, p4/M, z17.b, z21.b\n"
-    "umax z16.b, p4/M, z16.b, z20.b\n"
-    "umax z10.b, p4/M, z10.b, z19.b\n"
-    "umax z9.b, p4/M, z9.b, z18.b\n"
-    "umax z8.b, p4/M, z8.b, z17.b\n"
-    "umax z7.b, p4/M, z7.b, z16.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+    "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
+    "umax z22.b, p0/M, z22.b, z30.b\n"
+    "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
+    "umax z21.b, p0/M, z21.b, z27.b\n"
+    "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
+    "umax z20.b, p0/M, z20.b, z24.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z18.b, p0/M, z18.b, z22.b\n"
+    "umax z17.b, p0/M, z17.b, z21.b\n"
+    "umax z16.b, p0/M, z16.b, z20.b\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
+    "umax z7.b, p0/M, z7.b, z18.b\n"
+    "umax z6.b, p0/M, z6.b, z17.b\n"
+    "umax z5.b, p0/M, z5.b, z16.b\n"
     "4:"  // 4-vectors of channels: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 6f\n"
     "5:"  // 4-vectors of channels: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z10.b, p4/M, z10.b, z3.b\n"
-    "ld1b { z31.b }, p2/Z, [x23, x27]\n"
-    "ld1b { z28.b }, p1/Z, [x23, x26]\n"
-    "umax z9.b, p4/M, z9.b, z31.b\n"
-    "ld1b { z16.b }, p0/Z, [x23, x25]\n"
-    "umax z8.b, p4/M, z8.b, z28.b\n"
-    "umax z7.b, p4/M, z7.b, z16.b\n"
+    "umax z8.b, p0/M, z8.b, z4.b\n"
+    "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+    "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+    "umax z7.b, p0/M, z7.b, z0.b\n"
+    "umax z6.b, p0/M, z6.b, z29.b\n"
+    "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+    "umax z5.b, p0/M, z5.b, z26.b\n"
     "bgt 5b\n"
     "6:"  // 4-vectors of channels: Single input loop: End
-    "mov z6.s, #0x0\n"
     "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
-    "ld1rw { z5.s }, p4/Z, [x19]\n"
-    "mov z4.s, #0xff\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    ".inst 0x4508a951  // ushllb z17.h, z10.b, #0x0\n"
-    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a918  // ushllb z24.h, z8.b, #0x0\n"
+    ".inst 0x4508ad17  // ushllt z23.h, z8.b, #0x0\n"
+    ".inst 0x4508a8f6  // ushllb z22.h, z7.b, #0x0\n"
+    ".inst 0x4508acf5  // ushllt z21.h, z7.b, #0x0\n"
+    "neg z4.s, p0/M, z4.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    ".inst 0x4508ad50  // ushllt z16.h, z10.b, #0x0\n"
-    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    ".inst 0x4508a8d4  // ushllb z20.h, z6.b, #0x0\n"
+    ".inst 0x4508acd3  // ushllt z19.h, z6.b, #0x0\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    ".inst 0x4508a8b2  // ushllb z18.h, z5.b, #0x0\n"
+    ".inst 0x4508acb1  // ushllt z17.h, z5.b, #0x0\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    ".inst 0x4508a937  // ushllb z23.h, z9.b, #0x0\n"
-    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    ".inst 0x45984082  // saddwb z2.s, z4.s, z24.h\n"
+    ".inst 0x45984481  // saddwt z1.s, z4.s, z24.h\n"
+    ".inst 0x44828062  // srshl z2.s, p0/M, z2.s, z3.s\n"
+    ".inst 0x44828061  // srshl z1.s, p0/M, z1.s, z3.s\n"
+    ".inst 0x45974080  // saddwb z0.s, z4.s, z23.h\n"
+    ".inst 0x4597449f  // saddwt z31.s, z4.s, z23.h\n"
+    ".inst 0x44828060  // srshl z0.s, p0/M, z0.s, z3.s\n"
+    ".inst 0x4482807f  // srshl z31.s, p0/M, z31.s, z3.s\n"
+    ".inst 0x4596409e  // saddwb z30.s, z4.s, z22.h\n"
+    ".inst 0x4596449d  // saddwt z29.s, z4.s, z22.h\n"
+    ".inst 0x4482807e  // srshl z30.s, p0/M, z30.s, z3.s\n"
+    ".inst 0x4482807d  // srshl z29.s, p0/M, z29.s, z3.s\n"
+    ".inst 0x4595409c  // saddwb z28.s, z4.s, z21.h\n"
+    ".inst 0x4595449b  // saddwt z27.s, z4.s, z21.h\n"
+    ".inst 0x4482807c  // srshl z28.s, p0/M, z28.s, z3.s\n"
+    ".inst 0x4482807b  // srshl z27.s, p0/M, z27.s, z3.s\n"
+    ".inst 0x4594409a  // saddwb z26.s, z4.s, z20.h\n"
+    ".inst 0x45944499  // saddwt z25.s, z4.s, z20.h\n"
+    ".inst 0x4482807a  // srshl z26.s, p0/M, z26.s, z3.s\n"
+    ".inst 0x44828079  // srshl z25.s, p0/M, z25.s, z3.s\n"
+    ".inst 0x45934098  // saddwb z24.s, z4.s, z19.h\n"
+    ".inst 0x45934497  // saddwt z23.s, z4.s, z19.h\n"
+    ".inst 0x44828078  // srshl z24.s, p0/M, z24.s, z3.s\n"
+    ".inst 0x44828077  // srshl z23.s, p0/M, z23.s, z3.s\n"
+    ".inst 0x45924096  // saddwb z22.s, z4.s, z18.h\n"
+    ".inst 0x45924495  // saddwt z21.s, z4.s, z18.h\n"
+    ".inst 0x44828076  // srshl z22.s, p0/M, z22.s, z3.s\n"
+    ".inst 0x44828075  // srshl z21.s, p0/M, z21.s, z3.s\n"
+    ".inst 0x45914094  // saddwb z20.s, z4.s, z17.h\n"
+    ".inst 0x45914493  // saddwt z19.s, z4.s, z17.h\n"
+    ".inst 0x44828074  // srshl z20.s, p0/M, z20.s, z3.s\n"
+    ".inst 0x44828073  // srshl z19.s, p0/M, z19.s, z3.s\n"
+    "ld1rw { z17.s }, p0/Z, [x19]\n"
+    ".inst 0x04b07442  // sqrdmulh z2.s, z2.s, z16.s\n"
+    ".inst 0x04b07421  // sqrdmulh z1.s, z1.s, z16.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    ".inst 0x4508ad36  // ushllt z22.h, z9.b, #0x0\n"
-    "ld1rw { z0.s }, p4/Z, [x19]\n"
-    ".inst 0x4508a912  // ushllb z18.h, z8.b, #0x0\n"
-    ".inst 0x4508ad15  // ushllt z21.h, z8.b, #0x0\n"
-    ".inst 0x4508a8f4  // ushllb z20.h, z7.b, #0x0\n"
-    ".inst 0x4508acf3  // ushllt z19.h, z7.b, #0x0\n"
-    "neg z5.s, p4/M, z5.s\n"
-    ".inst 0x459140bf  // saddwb z31.s, z5.s, z17.h\n"
-    ".inst 0x459144b1  // saddwt z17.s, z5.s, z17.h\n"
-    ".inst 0x459040be  // saddwb z30.s, z5.s, z16.h\n"
-    ".inst 0x459044b0  // saddwt z16.s, z5.s, z16.h\n"
-    ".inst 0x459740bd  // saddwb z29.s, z5.s, z23.h\n"
-    ".inst 0x459744bc  // saddwt z28.s, z5.s, z23.h\n"
-    ".inst 0x459640bb  // saddwb z27.s, z5.s, z22.h\n"
-    ".inst 0x459644ba  // saddwt z26.s, z5.s, z22.h\n"
-    ".inst 0x459240b9  // saddwb z25.s, z5.s, z18.h\n"
-    ".inst 0x459244b2  // saddwt z18.s, z5.s, z18.h\n"
-    ".inst 0x459540b8  // saddwb z24.s, z5.s, z21.h\n"
-    ".inst 0x459544b7  // saddwt z23.s, z5.s, z21.h\n"
-    ".inst 0x459440b6  // saddwb z22.s, z5.s, z20.h\n"
-    ".inst 0x459444b5  // saddwt z21.s, z5.s, z20.h\n"
-    ".inst 0x459340b4  // saddwb z20.s, z5.s, z19.h\n"
-    ".inst 0x459344b3  // saddwt z19.s, z5.s, z19.h\n"
-    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
-    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
-    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
-    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
-    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
-    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
-    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
-    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
-    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
-    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
-    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
-    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
-    "add z31.s, z31.s, z0.s\n"
-    "add z17.s, z17.s, z0.s\n"
-    "add z30.s, z30.s, z0.s\n"
-    "add z16.s, z16.s, z0.s\n"
-    ".inst 0x4482905d  // srshl z29.s, p4/M, z29.s, z2.s\n"
-    ".inst 0x4482905c  // srshl z28.s, p4/M, z28.s, z2.s\n"
-    ".inst 0x4482905b  // srshl z27.s, p4/M, z27.s, z2.s\n"
-    ".inst 0x4482905a  // srshl z26.s, p4/M, z26.s, z2.s\n"
-    ".inst 0x04a377bd  // sqrdmulh z29.s, z29.s, z3.s\n"
-    ".inst 0x04a3779c  // sqrdmulh z28.s, z28.s, z3.s\n"
-    ".inst 0x04a3777b  // sqrdmulh z27.s, z27.s, z3.s\n"
-    ".inst 0x04a3775a  // sqrdmulh z26.s, z26.s, z3.s\n"
-    ".inst 0x4482903d  // srshl z29.s, p4/M, z29.s, z1.s\n"
-    ".inst 0x4482903c  // srshl z28.s, p4/M, z28.s, z1.s\n"
-    ".inst 0x4482903b  // srshl z27.s, p4/M, z27.s, z1.s\n"
-    ".inst 0x4482903a  // srshl z26.s, p4/M, z26.s, z1.s\n"
-    "add z29.s, z29.s, z0.s\n"
-    "add z28.s, z28.s, z0.s\n"
-    "add z27.s, z27.s, z0.s\n"
-    "add z26.s, z26.s, z0.s\n"
-    ".inst 0x44829059  // srshl z25.s, p4/M, z25.s, z2.s\n"
-    ".inst 0x44829052  // srshl z18.s, p4/M, z18.s, z2.s\n"
-    "smax z31.s, p4/M, z31.s, z6.s\n"
-    "smax z17.s, p4/M, z17.s, z6.s\n"
-    ".inst 0x04a37739  // sqrdmulh z25.s, z25.s, z3.s\n"
-    ".inst 0x04a37652  // sqrdmulh z18.s, z18.s, z3.s\n"
-    "smin z31.s, p4/M, z31.s, z4.s\n"
-    "smin z17.s, p4/M, z17.s, z4.s\n"
-    ".inst 0x44829039  // srshl z25.s, p4/M, z25.s, z1.s\n"
-    ".inst 0x44829032  // srshl z18.s, p4/M, z18.s, z1.s\n"
-    "smax z30.s, p4/M, z30.s, z6.s\n"
-    "trn1 z17.h, z31.h, z17.h\n"
-    "add z25.s, z25.s, z0.s\n"
-    "add z18.s, z18.s, z0.s\n"
-    ".inst 0x44829058  // srshl z24.s, p4/M, z24.s, z2.s\n"
-    ".inst 0x44829057  // srshl z23.s, p4/M, z23.s, z2.s\n"
-    "smin z30.s, p4/M, z30.s, z4.s\n"
-    "smax z16.s, p4/M, z16.s, z6.s\n"
-    ".inst 0x04a37718  // sqrdmulh z24.s, z24.s, z3.s\n"
-    ".inst 0x04a376f7  // sqrdmulh z23.s, z23.s, z3.s\n"
-    "smax z29.s, p4/M, z29.s, z6.s\n"
-    "smin z16.s, p4/M, z16.s, z4.s\n"
-    ".inst 0x44829038  // srshl z24.s, p4/M, z24.s, z1.s\n"
-    ".inst 0x44829037  // srshl z23.s, p4/M, z23.s, z1.s\n"
-    "smin z29.s, p4/M, z29.s, z4.s\n"
-    "trn1 z16.h, z30.h, z16.h\n"
-    "add z24.s, z24.s, z0.s\n"
-    "add z23.s, z23.s, z0.s\n"
+    ".inst 0x04b07400  // sqrdmulh z0.s, z0.s, z16.s\n"
+    ".inst 0x04b077ff  // sqrdmulh z31.s, z31.s, z16.s\n"
+    ".inst 0x44828222  // srshl z2.s, p0/M, z2.s, z17.s\n"
+    ".inst 0x44828221  // srshl z1.s, p0/M, z1.s, z17.s\n"
+    ".inst 0x04b077de  // sqrdmulh z30.s, z30.s, z16.s\n"
+    ".inst 0x04b077bd  // sqrdmulh z29.s, z29.s, z16.s\n"
+    ".inst 0x44828220  // srshl z0.s, p0/M, z0.s, z17.s\n"
+    ".inst 0x4482823f  // srshl z31.s, p0/M, z31.s, z17.s\n"
+    ".inst 0x04b0779c  // sqrdmulh z28.s, z28.s, z16.s\n"
+    ".inst 0x04b0777b  // sqrdmulh z27.s, z27.s, z16.s\n"
+    ".inst 0x4482823e  // srshl z30.s, p0/M, z30.s, z17.s\n"
+    ".inst 0x4482823d  // srshl z29.s, p0/M, z29.s, z17.s\n"
+    ".inst 0x04b0775a  // sqrdmulh z26.s, z26.s, z16.s\n"
+    ".inst 0x04b07739  // sqrdmulh z25.s, z25.s, z16.s\n"
+    ".inst 0x4482823c  // srshl z28.s, p0/M, z28.s, z17.s\n"
+    ".inst 0x4482823b  // srshl z27.s, p0/M, z27.s, z17.s\n"
+    ".inst 0x04b07718  // sqrdmulh z24.s, z24.s, z16.s\n"
+    ".inst 0x04b076f7  // sqrdmulh z23.s, z23.s, z16.s\n"
+    ".inst 0x4482823a  // srshl z26.s, p0/M, z26.s, z17.s\n"
+    ".inst 0x44828239  // srshl z25.s, p0/M, z25.s, z17.s\n"
+    ".inst 0x04b076d6  // sqrdmulh z22.s, z22.s, z16.s\n"
+    ".inst 0x04b076b5  // sqrdmulh z21.s, z21.s, z16.s\n"
+    ".inst 0x44828238  // srshl z24.s, p0/M, z24.s, z17.s\n"
+    ".inst 0x44828237  // srshl z23.s, p0/M, z23.s, z17.s\n"
+    ".inst 0x04b07694  // sqrdmulh z20.s, z20.s, z16.s\n"
+    ".inst 0x04b07673  // sqrdmulh z19.s, z19.s, z16.s\n"
+    ".inst 0x44828236  // srshl z22.s, p0/M, z22.s, z17.s\n"
+    ".inst 0x44828235  // srshl z21.s, p0/M, z21.s, z17.s\n"
+    ".inst 0x44828234  // srshl z20.s, p0/M, z20.s, z17.s\n"
+    ".inst 0x44828233  // srshl z19.s, p0/M, z19.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    "add z2.s, z2.s, z16.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "add z31.s, z31.s, z16.s\n"
+    "add z30.s, z30.s, z16.s\n"
+    "add z29.s, z29.s, z16.s\n"
+    "add z28.s, z28.s, z16.s\n"
+    "add z27.s, z27.s, z16.s\n"
+    "add z26.s, z26.s, z16.s\n"
+    "add z25.s, z25.s, z16.s\n"
+    "add z24.s, z24.s, z16.s\n"
+    "add z23.s, z23.s, z16.s\n"
+    "add z22.s, z22.s, z16.s\n"
+    "add z21.s, z21.s, z16.s\n"
+    "add z20.s, z20.s, z16.s\n"
+    "add z19.s, z19.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "mov z18.s, #0xff\n"
+    "smax z30.s, p0/M, z30.s, z16.s\n"
+    "smax z29.s, p0/M, z29.s, z16.s\n"
+    "smax z28.s, p0/M, z28.s, z16.s\n"
+    "smax z27.s, p0/M, z27.s, z16.s\n"
+    "smax z26.s, p0/M, z26.s, z16.s\n"
+    "smax z25.s, p0/M, z25.s, z16.s\n"
+    "smax z24.s, p0/M, z24.s, z16.s\n"
+    "smax z23.s, p0/M, z23.s, z16.s\n"
+    "smax z22.s, p0/M, z22.s, z16.s\n"
+    "smax z21.s, p0/M, z21.s, z16.s\n"
+    "smax z20.s, p0/M, z20.s, z16.s\n"
+    "smax z19.s, p0/M, z19.s, z16.s\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "trn1 z17.h, z2.h, z1.h\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
+    "smin z31.s, p0/M, z31.s, z18.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
-    ".inst 0x44829056  // srshl z22.s, p4/M, z22.s, z2.s\n"
-    "incb x28, ALL, MUL #4\n"
-    ".inst 0x44829055  // srshl z21.s, p4/M, z21.s, z2.s\n"
-    ".inst 0x44829054  // srshl z20.s, p4/M, z20.s, z2.s\n"
-    ".inst 0x44829053  // srshl z19.s, p4/M, z19.s, z2.s\n"
-    "smax z28.s, p4/M, z28.s, z6.s\n"
-    ".inst 0x04a376d6  // sqrdmulh z22.s, z22.s, z3.s\n"
-    ".inst 0x04a376b5  // sqrdmulh z21.s, z21.s, z3.s\n"
-    ".inst 0x04a37694  // sqrdmulh z20.s, z20.s, z3.s\n"
-    ".inst 0x04a37673  // sqrdmulh z19.s, z19.s, z3.s\n"
-    ".inst 0x44829036  // srshl z22.s, p4/M, z22.s, z1.s\n"
-    ".inst 0x44829035  // srshl z21.s, p4/M, z21.s, z1.s\n"
-    ".inst 0x44829034  // srshl z20.s, p4/M, z20.s, z1.s\n"
-    ".inst 0x44829033  // srshl z19.s, p4/M, z19.s, z1.s\n"
-    "add z22.s, z22.s, z0.s\n"
-    "add z21.s, z21.s, z0.s\n"
-    "add z20.s, z20.s, z0.s\n"
-    "add z19.s, z19.s, z0.s\n"
-    "smax z27.s, p4/M, z27.s, z6.s\n"
-    "smax z26.s, p4/M, z26.s, z6.s\n"
-    "smax z25.s, p4/M, z25.s, z6.s\n"
-    "smin z28.s, p4/M, z28.s, z4.s\n"
-    "smin z27.s, p4/M, z27.s, z4.s\n"
-    "smin z26.s, p4/M, z26.s, z4.s\n"
-    "smin z25.s, p4/M, z25.s, z4.s\n"
-    "trn1 z17.h, z29.h, z28.h\n"
-    "smax z18.s, p4/M, z18.s, z6.s\n"
-    "trn1 z16.h, z27.h, z26.h\n"
-    "smax z24.s, p4/M, z24.s, z6.s\n"
+    "smin z30.s, p0/M, z30.s, z18.s\n"
+    "smin z29.s, p0/M, z29.s, z18.s\n"
+    "trn1 z17.h, z30.h, z29.h\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+    "smin z28.s, p0/M, z28.s, z18.s\n"
+    "smin z27.s, p0/M, z27.s, z18.s\n"
+    "trn1 z16.h, z28.h, z27.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p2, [%x[outptr], x27]\n"
-    "smin z18.s, p4/M, z18.s, z4.s\n"
-    "incb x27, ALL, MUL #4\n"
-    "smin z24.s, p4/M, z24.s, z4.s\n"
-    "smax z23.s, p4/M, z23.s, z6.s\n"
-    "smax z22.s, p4/M, z22.s, z6.s\n"
-    "smax z21.s, p4/M, z21.s, z6.s\n"
-    "trn1 z18.h, z25.h, z18.h\n"
-    "smin z23.s, p4/M, z23.s, z4.s\n"
-    "smin z22.s, p4/M, z22.s, z4.s\n"
-    "smin z21.s, p4/M, z21.s, z4.s\n"
-    "smax z20.s, p4/M, z20.s, z6.s\n"
+    "smin z26.s, p0/M, z26.s, z18.s\n"
+    "smin z25.s, p0/M, z25.s, z18.s\n"
+    "trn1 z17.h, z26.h, z25.h\n"
+    "st1b { z16.b }, p3, [%x[outptr], x27]\n"
+    "smin z24.s, p0/M, z24.s, z18.s\n"
+    "smin z23.s, p0/M, z23.s, z18.s\n"
     "trn1 z16.h, z24.h, z23.h\n"
-    "smax z19.s, p4/M, z19.s, z6.s\n"
+    "trn1 z16.b, z17.b, z16.b\n"
+    "smin z22.s, p0/M, z22.s, z18.s\n"
+    "smin z21.s, p0/M, z21.s, z18.s\n"
     "trn1 z17.h, z22.h, z21.h\n"
-    "trn1 z16.b, z18.b, z16.b\n"
-    "st1b { z16.b }, p1, [%x[outptr], x26]\n"
-    "smin z20.s, p4/M, z20.s, z4.s\n"
-    "incb x26, ALL, MUL #4\n"
-    "smin z19.s, p4/M, z19.s, z4.s\n"
+    "st1b { z16.b }, p2, [%x[outptr], x26]\n"
+    "smin z20.s, p0/M, z20.s, z18.s\n"
+    "smin z19.s, p0/M, z19.s, z18.s\n"
     "trn1 z16.h, z20.h, z19.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p0, [%x[outptr], x25]\n"
+    "st1b { z16.b }, p1, [%x[outptr], x25]\n"
     "incb x25, ALL, MUL #4\n"
-    "whilelt p0.b, x25, %x[n_channels]\n"
+    "whilelt p1.b, x25, %x[n_channels]\n"
+    "incb x28, ALL, MUL #4\n"
+    "incb x27, ALL, MUL #4\n"
+    "incb x26, ALL, MUL #4\n"
     "b.any 1b\n"
     "7:"  // Single vector of channels
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.none 14f\n"
     "8:"  // Single vector of channels: Loop
-    "mov z10.b, #0x0\n"
-    "mov x19, %x[inptrs]\n"
     "lsr x24, %x[n_valid_cells], #0x2\n"
+    "mov z8.b, #0x0\n"
+    "mov x19, %x[inptrs]\n"
     "cbz x24, 11f\n"
     "ldp x23, x22, [x19, #0x0]\n"
     "ldp x21, x20, [x19, #0x10]\n"
-    "add x19, x19, #0x20\n"
     "subs x24, x24, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "add x19, x19, #0x20\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "beq 10f\n"
     "9:"  // Single vector of channels: Loop: 4 inputs loop
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
     "ldp x23, x22, [x19, #0x0]\n"
-    "subs x24, x24, #0x1\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
     "ldp x21, x20, [x19, #0x10]\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "subs x24, x24, #0x1\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+    "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
     "add x19, x19, #0x20\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "ld1b { z2.b }, p3/Z, [x22, x28]\n"
-    "umax z10.b, p4/M, z10.b, z19.b\n"
-    "ld1b { z1.b }, p3/Z, [x21, x28]\n"
-    "ld1b { z0.b }, p3/Z, [x20, x28]\n"
+    "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+    "ld1b { z1.b }, p4/Z, [x20, x28]\n"
     "bgt 9b\n"
     "10:"  // Single vector of channels: Loop: 4 inputs tail
-    "movprfx z19, z3\n umax z19.b, p4/M, z19.b, z2.b\n"
-    "movprfx z23, z1\n umax z23.b, p4/M, z23.b, z0.b\n"
-    "umax z19.b, p4/M, z19.b, z23.b\n"
-    "umax z10.b, p4/M, z10.b, z19.b\n"
+    "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
+    "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+    "umax z19.b, p0/M, z19.b, z23.b\n"
+    "umax z8.b, p0/M, z8.b, z19.b\n"
     "11:"  // Single vector of channels: Loop: After loop
     "ands x20, %x[n_valid_cells], #0x3\n"
     "beq 13f\n"
     "12:"  // Single vector of channels: Loop: Single input loop
     "ldr x23, [x19], #0x8\n"
+    "ld1b { z4.b }, p4/Z, [x23, x28]\n"
     "subs x20, x20, #0x1\n"
-    "ld1b { z3.b }, p3/Z, [x23, x28]\n"
-    "umax z10.b, p4/M, z10.b, z3.b\n"
+    "umax z8.b, p0/M, z8.b, z4.b\n"
     "bgt 12b\n"
     "13:"  // Single vector of channels: Loop: Single input loop: End
-    "mov z6.s, #0x0\n"
     "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
-    "ld1rw { z5.s }, p4/Z, [x19]\n"
-    "mov z4.s, #0xff\n"
-    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
-    ".inst 0x4508a951  // ushllb z17.h, z10.b, #0x0\n"
-    "ld1rw { z3.s }, p4/Z, [x19]\n"
+    "ld1rw { z4.s }, p0/Z, [x19]\n"
+    ".inst 0x4508a918  // ushllb z24.h, z8.b, #0x0\n"
+    ".inst 0x4508ad17  // ushllt z23.h, z8.b, #0x0\n"
+    "neg z4.s, p0/M, z4.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
-    ".inst 0x4508ad50  // ushllt z16.h, z10.b, #0x0\n"
-    "ld1rw { z2.s }, p4/Z, [x19]\n"
+    ".inst 0x45984082  // saddwb z2.s, z4.s, z24.h\n"
+    ".inst 0x45984481  // saddwt z1.s, z4.s, z24.h\n"
+    ".inst 0x45974080  // saddwb z0.s, z4.s, z23.h\n"
+    ".inst 0x4597449f  // saddwt z31.s, z4.s, z23.h\n"
+    "ld1rw { z3.s }, p0/Z, [x19]\n"
+    "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    ".inst 0x44828062  // srshl z2.s, p0/M, z2.s, z3.s\n"
+    ".inst 0x44828061  // srshl z1.s, p0/M, z1.s, z3.s\n"
+    ".inst 0x04b07442  // sqrdmulh z2.s, z2.s, z16.s\n"
+    ".inst 0x44828060  // srshl z0.s, p0/M, z0.s, z3.s\n"
+    ".inst 0x4482807f  // srshl z31.s, p0/M, z31.s, z3.s\n"
+    ".inst 0x04b07421  // sqrdmulh z1.s, z1.s, z16.s\n"
+    ".inst 0x04b07400  // sqrdmulh z0.s, z0.s, z16.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
-    "neg z5.s, p4/M, z5.s\n"
-    "ld1rw { z1.s }, p4/Z, [x19]\n"
+    "ld1rw { z17.s }, p0/Z, [x19]\n"
+    ".inst 0x04b077ff  // sqrdmulh z31.s, z31.s, z16.s\n"
     "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
-    ".inst 0x459140bf  // saddwb z31.s, z5.s, z17.h\n"
-    "ld1rw { z0.s }, p4/Z, [x19]\n"
-    ".inst 0x459144b1  // saddwt z17.s, z5.s, z17.h\n"
-    ".inst 0x459040be  // saddwb z30.s, z5.s, z16.h\n"
-    ".inst 0x459044b0  // saddwt z16.s, z5.s, z16.h\n"
-    ".inst 0x4482905f  // srshl z31.s, p4/M, z31.s, z2.s\n"
-    ".inst 0x44829051  // srshl z17.s, p4/M, z17.s, z2.s\n"
-    ".inst 0x4482905e  // srshl z30.s, p4/M, z30.s, z2.s\n"
-    ".inst 0x44829050  // srshl z16.s, p4/M, z16.s, z2.s\n"
-    ".inst 0x04a377ff  // sqrdmulh z31.s, z31.s, z3.s\n"
-    ".inst 0x04a37631  // sqrdmulh z17.s, z17.s, z3.s\n"
-    ".inst 0x04a377de  // sqrdmulh z30.s, z30.s, z3.s\n"
-    ".inst 0x04a37610  // sqrdmulh z16.s, z16.s, z3.s\n"
-    ".inst 0x4482903f  // srshl z31.s, p4/M, z31.s, z1.s\n"
-    ".inst 0x44829031  // srshl z17.s, p4/M, z17.s, z1.s\n"
-    ".inst 0x4482903e  // srshl z30.s, p4/M, z30.s, z1.s\n"
-    ".inst 0x44829030  // srshl z16.s, p4/M, z16.s, z1.s\n"
-    "add z31.s, z31.s, z0.s\n"
-    "add z17.s, z17.s, z0.s\n"
-    "add z30.s, z30.s, z0.s\n"
-    "add z16.s, z16.s, z0.s\n"
-    "smax z31.s, p4/M, z31.s, z6.s\n"
-    "smax z17.s, p4/M, z17.s, z6.s\n"
-    "smax z30.s, p4/M, z30.s, z6.s\n"
-    "smax z16.s, p4/M, z16.s, z6.s\n"
-    "smin z31.s, p4/M, z31.s, z4.s\n"
-    "smin z17.s, p4/M, z17.s, z4.s\n"
-    "smin z30.s, p4/M, z30.s, z4.s\n"
-    "smin z16.s, p4/M, z16.s, z4.s\n"
-    "trn1 z17.h, z31.h, z17.h\n"
-    "trn1 z16.h, z30.h, z16.h\n"
+    ".inst 0x44828222  // srshl z2.s, p0/M, z2.s, z17.s\n"
+    ".inst 0x44828221  // srshl z1.s, p0/M, z1.s, z17.s\n"
+    "ld1rw { z16.s }, p0/Z, [x19]\n"
+    "add z2.s, z2.s, z16.s\n"
+    ".inst 0x44828220  // srshl z0.s, p0/M, z0.s, z17.s\n"
+    ".inst 0x4482823f  // srshl z31.s, p0/M, z31.s, z17.s\n"
+    "add z1.s, z1.s, z16.s\n"
+    "add z0.s, z0.s, z16.s\n"
+    "add z31.s, z31.s, z16.s\n"
+    "mov z16.s, #0x0\n"
+    "smax z2.s, p0/M, z2.s, z16.s\n"
+    "smax z1.s, p0/M, z1.s, z16.s\n"
+    "smax z0.s, p0/M, z0.s, z16.s\n"
+    "smax z31.s, p0/M, z31.s, z16.s\n"
+    "mov z18.s, #0xff\n"
+    "smin z2.s, p0/M, z2.s, z18.s\n"
+    "smin z1.s, p0/M, z1.s, z18.s\n"
+    "trn1 z17.h, z2.h, z1.h\n"
+    "smin z0.s, p0/M, z0.s, z18.s\n"
+    "smin z31.s, p0/M, z31.s, z18.s\n"
+    "trn1 z16.h, z0.h, z31.h\n"
     "trn1 z16.b, z17.b, z16.b\n"
-    "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+    "st1b { z16.b }, p4, [%x[outptr], x28]\n"
     "incb x28\n"
-    "whilelt p3.b, x28, %x[n_channels]\n"
+    "whilelt p4.b, x28, %x[n_channels]\n"
     "b.any 8b\n"
     "14:"  // End
-
     :
     : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
-    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+    : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
   );
 }
 
 }  // namespace pooling
 }  // namespace arm_conv
 
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
index ad95207..556ae2a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,287 +24,259 @@
 
 #pragma once
 
-#include "pool_common.hpp"
+#include "depthfirst_driver.hpp"
+#include "src/core/NEON/kernels/arm_conv/addressing.hpp"
 #include "utils.hpp"
 
-#include "arm_compute/core/Types.h"
+#include <alloca.h>
 #include <limits>
 
 namespace arm_conv {
 namespace pooling {
 
-template <class strategy>
-class PoolingDepthfirst : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
+template <typename TInput, typename TOutput>
+class DepthfirstStrategy : public IDepthfirstStrategy
 {
-  using TInput = typename strategy::operand_type;
-  using TOutput = typename strategy::return_type;
+  unsigned int input_rows, input_cols, output_rows, output_cols;
 
-  const PoolingArgs m_args;  // Copy of arguments
-
-  constexpr static unsigned int input_rows(void)
+  public:
+  DepthfirstStrategy(unsigned int window_rows, unsigned int window_cols,
+                     unsigned int stride_rows, unsigned int stride_cols,
+                     unsigned int output_rows, unsigned int output_cols)
+  : input_rows(output_rows + (window_rows - 1) * stride_rows),
+    input_cols(output_cols + (window_cols - 1) * stride_cols),
+    output_rows(output_rows), output_cols(output_cols)
   {
-    return (strategy::out_rows() - 1)*strategy::stride_rows() + strategy::pool_rows();
   }
 
-  constexpr static unsigned int input_cols(void)
-  {
-    return (strategy::out_cols() - 1)*strategy::stride_cols() + strategy::pool_cols();
-  }
+  unsigned int get_input_rows() const override { return input_rows; }
+  unsigned int get_input_cols() const override { return input_cols; }
+  unsigned int get_output_rows() const override { return output_rows; }
+  unsigned int get_output_cols() const override { return output_cols; }
 
+  typedef void (*KernelType)(
+    unsigned int n_channels,
+    const TInput *const *,
+    TOutput *const *,
+    bool exclude_padding,
+    unsigned int pad_left,
+    unsigned int pad_top,
+    unsigned int pad_right,
+    unsigned int pad_bottom
+  );
+  virtual KernelType get_kernel(void) const = 0;
+};
+
+
+struct WorkingSpace
+{
+  void *input_buffer;
+  void *output_buffer;
+};
+
+
+template <typename TInput, typename TOutput=TInput, class OutputStage=Nothing>
+class PoolingDepthfirst : public DepthfirstDriver<TInput, TOutput>
+{
   size_t sizeof_input_buffer(void) const
   {
-    return sizeof(TInput) * m_args.n_channels;
+    return sizeof(TInput) * this->m_args.n_channels;
   }
 
   size_t sizeof_output_buffer(void) const
   {
-    return sizeof(TOutput) * m_args.n_channels;
+    return sizeof(TOutput) * this->m_args.n_channels;
+  }
+
+  protected:
+  /* Compute the amount of working space required for a single thread. */
+  size_t get_working_size_per_thread(unsigned int n_channels) const override
+  {
+    return sizeof(WorkingSpace) + n_channels * (sizeof(TInput) + sizeof(TOutput));
+  }
+
+  /* Initialise the working space for a thread. */
+  void initialise_working_space(void *raw_ws, unsigned int n_channels) const override
+  {
+    auto ws = reinterpret_cast<WorkingSpace *>(raw_ws);
+    ws->input_buffer = ws + 1;
+    ws->output_buffer = reinterpret_cast<TInput *>(ws + 1) + n_channels;
+
+    // Fill the input buffer with an appropriate value
+    TInput fill_val = 0;
+    if (this->m_args.pool_type == PoolingType::MAX)
+    {
+      using limits = std::numeric_limits<TInput>;
+      if (limits::has_infinity)
+      {
+        fill_val = -limits::infinity();
+      }
+      else
+      {
+        fill_val = limits::min();
+      }
+    }
+
+    auto ptr = reinterpret_cast<TInput *>(ws->input_buffer);
+    for (; n_channels; n_channels--)
+    {
+      *(ptr++) = fill_val;
+    }
+  }
+
+  /* Compute a portion of the output tensor with padding. */
+  void compute_tile_padded(
+    unsigned int output_i, unsigned int output_j,
+    unsigned int channel_start, unsigned int channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const override
+  {
+    const auto kern = reinterpret_cast<const DepthfirstStrategy<TInput, TOutput> *>(
+      this->m_strat.get())->get_kernel();
+
+    // Get the working space, and some space on the stack for pointer arrays
+    auto ws = reinterpret_cast<WorkingSpace *>(working_space);
+    auto inptr_array = reinterpret_cast<const TInput **>(alloca(
+        sizeof(TInput *) * this->m_strat->get_input_rows() * this->m_strat->get_input_cols()));
+    auto outptr_array = reinterpret_cast<TOutput **>(alloca(
+        sizeof(TOutput *) * this->m_strat->get_output_rows() * this->m_strat->get_output_cols()));
+
+    // Prepare the input pointers
+    const int ii = static_cast<int>(output_i * this->m_args.pool_stride.rows) - this->m_args.padding.top;
+    const auto input_pad_top = static_cast<unsigned int>(ii < 0 ? -ii : 0);
+    const auto input_i = static_cast<unsigned int>(ii < 0 ? 0 : ii);
+
+    const unsigned int end_ii = ii + this->m_strat->get_input_rows();
+    const auto input_pad_bottom = end_ii < this->m_args.input_rows ? 0 : end_ii - this->m_args.input_rows;
+
+    const int ij = static_cast<int>(output_j * this->m_args.pool_stride.cols) - this->m_args.padding.left;
+    const auto input_pad_left = static_cast<unsigned int>(ij < 0 ? -ij : 0);
+    const auto input_j = static_cast<unsigned int>(ij < 0 ? 0 : ij);
+
+    const unsigned int end_ij = ij + this->m_strat->get_input_cols();
+    const auto input_pad_right = end_ij < this->m_args.input_cols ? 0 : end_ij - this->m_args.input_cols;
+
+    fill_pointer_array<const TInput>(
+      inptr_array, this->m_strat->get_input_rows(), this->m_strat->get_input_cols(),
+      input.base + input_i*input.ld_row + input_j*input.ld_col + channel_start,
+      input.ld_row, input.ld_col,
+      reinterpret_cast<const TInput *>(ws->input_buffer),
+      input_pad_top, this->m_args.input_rows - input_i,
+      input_pad_left, this->m_args.input_cols - input_j
+    );
+
+    // Prepare the output pointers
+    fill_pointer_array(
+      outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(),
+      output.base + output_i*output.ld_row + output_j*output.ld_col + channel_start,
+      output.ld_row, output.ld_col,
+      reinterpret_cast<TOutput *>(ws->output_buffer),
+      0, this->m_args.output_rows - output_i, // Top padding, # valid rows
+      0, this->m_args.output_cols - output_j  // Left padding, # valid columns
+    );
+
+    // Call the kernel
+    kern(
+      channel_end - channel_start, inptr_array, outptr_array,
+      this->m_args.exclude_padding,
+      input_pad_left, input_pad_top,
+      input_pad_right, input_pad_bottom
+    );
+  }
+
+  // Compute a portion of the work with only top/bottom padding.
+  void compute_row_padded_tile_row(
+    const unsigned int output_i, unsigned int output_j, unsigned int n_tile_cols,
+    const unsigned int channel_start, const unsigned int channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const override
+  {
+    const auto kern = reinterpret_cast<const DepthfirstStrategy<TInput, TOutput> *>(
+      this->m_strat.get())->get_kernel();
+
+    // Get the working space, and some space on the stack for pointer arrays
+    auto ws = reinterpret_cast<WorkingSpace *>(working_space);
+    auto inptr_array = reinterpret_cast<const TInput **>(alloca(
+        sizeof(TInput *) * this->m_strat->get_input_rows() * this->m_strat->get_input_cols()));
+    auto outptr_array = reinterpret_cast<TOutput **>(alloca(
+        sizeof(TOutput *) * this->m_strat->get_output_rows() * this->m_strat->get_output_cols()));
+
+    // Prepare the initial input pointers
+    const int ii = static_cast<int>(output_i * this->m_args.pool_stride.rows) - this->m_args.padding.top;
+    const auto input_pad_top = static_cast<unsigned int>(ii < 0 ? -ii : 0);
+    const auto input_i = static_cast<unsigned int>(ii < 0 ? 0 : ii);
+
+    const unsigned int end_ii = ii + this->m_strat->get_input_rows();
+    const auto input_pad_bottom = end_ii < this->m_args.input_rows ? 0 : end_ii - this->m_args.input_rows;
+
+    const int ij = static_cast<int>(output_j * this->m_args.pool_stride.cols) - this->m_args.padding.left;
+    const auto input_j = static_cast<unsigned int>(ij < 0 ? 0 : ij);
+
+    const auto end_oi = output_i + this->m_strat->get_output_cols();
+    const auto output_pad_bottom = end_oi < this->m_args.output_rows ? 0 : end_oi - this->m_args.output_rows;
+
+    fill_pointer_array<const TInput>(
+      inptr_array, this->m_strat->get_input_rows(), this->m_strat->get_input_cols(),
+      input.base + input_i*input.ld_row + input_j*input.ld_col + channel_start,
+      input.ld_row, input.ld_col,
+      reinterpret_cast<const TInput *>(ws->input_buffer),
+      input_pad_top, this->m_args.input_rows - input_i,
+      0, this->m_args.input_cols - input_j
+    );
+
+    // Prepare the initial output pointers
+    fill_pointer_array(
+      outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(),
+      output.base + output_i*output.ld_row + output_j*output.ld_col + channel_start,
+      output.ld_row, output.ld_col,
+      reinterpret_cast<TOutput *>(ws->output_buffer),
+      0, this->m_args.output_rows - output_i, // Top padding, # valid rows
+      0, this->m_args.output_cols - output_j  // Left padding, # valid columns
+    );
+
+    // Call the kernel
+    for (; n_tile_cols; n_tile_cols--)
+    {
+      kern(
+        channel_end - channel_start, inptr_array, outptr_array,
+        this->m_args.exclude_padding,
+        0, input_pad_top,
+        0, input_pad_bottom
+      );
+
+      // Progress the input and output pointer arrays
+      const auto input_col_stride = input.ld_col * this->m_strat->get_output_cols() * this->m_args.pool_stride.cols;
+      for (
+        auto n = input_pad_top * this->m_strat->get_input_cols();
+        n < (this->m_strat->get_input_rows() - input_pad_bottom) * this->m_strat->get_input_cols();
+        n++
+      )
+      {
+        inptr_array[n] += input_col_stride;
+      }
+
+      const auto output_col_stride = output.ld_col * this->m_strat->get_output_cols();
+      for (
+        auto n = 0u;
+        n < (this->m_strat->get_output_rows() - output_pad_bottom) * this->m_strat->get_output_cols();
+        n++
+      )
+      {
+        outptr_array[n] += output_col_stride;
+      }
+    }
   }
 
   public:
-  PoolingDepthfirst(const PoolingArgs &args) : m_args(args)
+  PoolingDepthfirst(const DepthfirstStrategy<TInput, TOutput> *strat,
+                    const PoolingArgs &args, const OutputStage &os = {})
+  : DepthfirstDriver<TInput, TOutput>(strat, args)
   {
-  }
-
-  PoolingDepthfirst(PoolingDepthfirst &) = delete;
-  PoolingDepthfirst &operator=(PoolingDepthfirst &) = delete;
-
-  size_t get_working_size(unsigned int num_threads) const override
-  {
-    // We require a channel-length vector of input padding values
-    // (to be shared amongst all threads) and (for each thread) a
-    // channel-length vector in which to dump surplus output.
-    return sizeof_input_buffer() + num_threads * sizeof_output_buffer();
-  }
-
-  void execute(
-    const void *const input,
-    void *const output,
-    void *const working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    const size_t ld_input_col = m_args.n_channels;
-    const size_t ld_input_row = ld_input_col * m_args.input_cols;
-    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
-    const size_t ld_output_col = ld_input_col;
-    const size_t ld_output_row = ld_output_col * m_args.output_cols;
-    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
-
-    execute(
-      input, ld_input_col, ld_input_row, ld_input_batch,
-      output, ld_output_col, ld_output_row, ld_output_batch,
-      working_space,
-      thread_id, num_threads
-    );
-  }
-
-  void execute(
-    const void *const input,
-    size_t ld_input_col,
-    size_t ld_input_row,
-    size_t ld_input_batch,
-    void *const output,
-    size_t ld_output_col,
-    size_t ld_output_row,
-    size_t ld_output_batch,
-    void *const working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    execute(
-      m_args.n_batches, m_args.input_rows, m_args.input_cols,
-      m_args.n_channels,
-      input, ld_input_col, ld_input_row, ld_input_batch,
-      m_args.padding,
-      m_args.output_rows, m_args.output_cols,
-      output, ld_output_col, ld_output_row, ld_output_batch,
-      working_space,
-      thread_id, num_threads
-    );
-  }
-
-  void execute(
-    unsigned int batches,
-    unsigned int height,
-    unsigned int width,
-    unsigned int channels,
-    const void *const _input,
-    size_t ld_input_col,
-    size_t ld_input_row,
-    size_t ld_input_batch,
-    const PaddingValues &padding,
-    unsigned int output_height,
-    unsigned int output_width,
-    void *const _output,
-    size_t ld_output_col,
-    size_t ld_output_row,
-    size_t ld_output_batch,
-    void *const _working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    ARM_COMPUTE_UNUSED(batches, ld_input_batch, ld_output_batch);
-    strategy strat(m_args.cpu_info);
-#ifdef CYCLE_PROFILING
-    arm_gemm::profiler prof;
-#endif // CYCLE_PROFILING
-
-    // Cast input and output pointers into the right types
-    const TInput *const inptr = static_cast<const TInput *>(_input);
-    TOutput *const outptr = static_cast<TOutput *>(_output);
-
-    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
-    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
-    const int start_out_height = static_cast<int>(thread_id * rows_per_thread);
-    const int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));
-
-    // Create an array for the input pointers
-    const TInput * _inptr_array[input_rows() * input_cols()];
-    const TInput **const inptr_array = _inptr_array;
-
-    // Create an array for the output pointers
-    TOutput * _outptr_array[strategy::out_rows() * strategy::out_cols()];
-    TOutput **const outptr_array = _outptr_array;
-
-    // Allocate portions of the working space
-    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
-    TOutput *const output_buffer = reinterpret_cast<TOutput *>(working_space + thread_id * sizeof_output_buffer());
-    TInput *const input_buffer = reinterpret_cast<TInput *>(working_space + num_threads * sizeof_output_buffer());
-
-    // Initialise the input buffer
-    for (unsigned int c = 0; c < channels; c++)
-    {
-      TInput &val = input_buffer[c];
-
-      if (strategy::pooling_type() == PoolingType::AVERAGE)
-      {
-        val = static_cast<TInput>(0);
-      }
-      else if (strategy::pooling_type() == PoolingType::MAX)
-      {
-#if defined(__aarch64__)
-        using InputType = typename std::conditional<std::is_same<TInput, __fp16>::value, arm_compute::half, TInput>::type;
-        using limits = std::numeric_limits<InputType>;
-#else // defined(__aarch64__)
-        using limits = std::numeric_limits<TInput>;
-#endif // defined(__aarch64__)
-        if (limits::has_infinity)
-        {
-          val = -limits::infinity();
-        }
-        else
-        {
-          val = limits::min();
-        }
-      }
-    }
-
-    // For each output tile, construct the requisite set of pointers and call
-    // into the kernel.
-    for (unsigned int batch = 0; batch < batches; batch++)
-    {
-      // Get batch pointers
-      const auto inptr_batch = inptr + batch * ld_input_batch;
-      const auto outptr_batch = outptr + batch * ld_output_batch;
-
-      for (int start_out_i = start_out_height;
-           start_out_i < end_out_height;
-           start_out_i += static_cast<int>(strategy::out_rows()))
-      {
-        const int end_out_i = start_out_i + strategy::out_rows();
-        const int start_in_i = start_out_i * strategy::stride_rows() - padding.top;
-        const int end_in_i = start_in_i + input_rows();
-
-        // Compute top/bottom padding - TODO Is this right for average pooling?
-        const auto pad_top = static_cast<unsigned int>(-std::min(start_in_i, 0));
-        const auto pad_bottom = static_cast<unsigned int>(-std::min(static_cast<int>(height) - end_in_i, 0));
-        const unsigned int valid_output_rows = std::min(
-          end_out_i - start_out_i,
-          static_cast<int>(end_out_height) - start_out_i
-        );
-
-        // Fill the input pointer array with padding values
-        for (auto index = 0u; index < input_rows() * input_cols(); index++)
-        {
-          inptr_array[index] = input_buffer;
-        }
-
-        for (int start_out_j = 0, start_in_j = -padding.left;
-             start_out_j < static_cast<int>(output_width);
-             start_out_j += static_cast<int>(strategy::out_cols()),
-             start_in_j += static_cast<int>(strategy::out_cols()) * strategy::stride_cols())
-        {
-          const int end_out_j = start_out_j + strategy::out_cols();
-          const int end_in_j = start_in_j + input_cols();
-
-          // Compute left/right padding - TODO Is this right for average pooling?
-          const auto pad_left = static_cast<unsigned int>(-std::min(start_in_j, 0));
-          const auto pad_right = static_cast<unsigned int>(-std::min(static_cast<int>(width) - end_in_j, 0));
-
-          const unsigned int valid_output_cols = std::min(
-            end_out_j - start_out_j,
-            static_cast<int>(output_width) - start_out_j
-          );
-
-          // Construct the input pointer array - fill the array with pointers to
-          // the input buffer and then fill in the required values.
-          for (auto i = pad_top; i < input_rows() - pad_bottom; i++)
-          {
-            // Can skip over the left padding because we will have either the
-            // same or less than the previous tile.
-            unsigned int j = pad_left;
-            const TInput *colptr = inptr_batch + (start_in_i + i) * ld_input_row + (start_in_j + j) * ld_input_col;
-            const TInput **ptrs = inptr_array + i * input_cols() + j;
-            for (; j < input_cols() - pad_right; j++)
-            {
-              *(ptrs++) = colptr;
-              colptr += ld_input_col;
-            }
-            for (; j < input_cols(); j++)
-            {
-              *(ptrs++) = input_buffer;
-            }
-          }
-
-          // Construct the output pointer array.
-          TOutput **outptr_pos = outptr_array;
-          for (auto i = 0u; i < valid_output_rows; i++)
-          {
-            unsigned int j = 0u;
-            TOutput *colptr = outptr_batch + (start_out_i + i) * ld_output_row + start_out_j * ld_output_col;
-            for (; j < valid_output_cols; j++)
-            {
-              *(outptr_pos++) = colptr;
-               colptr += ld_output_col;
-            }
-            for (; j < strategy::out_cols(); j++)
-            {
-              *(outptr_pos++) = output_buffer;
-            }
-          }
-          for (auto i = valid_output_rows; i < strategy::out_rows(); i++)
-          {
-            for (auto j = 0u; j < strategy::out_cols(); j++)
-            {
-              *(outptr_pos++) = output_buffer;
-            }
-          }
-
-#ifdef CYCLE_PROFILING
-          // TODO Work number
-          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::out_rows() * strategy::out_cols() * strategy::pool_rows() * strategy::pool_cols()));
-#endif
-          strat.kernel(
-            channels, inptr_array, outptr_array,
-            m_args.exclude_padding, pad_left, pad_top, pad_right, pad_bottom
-          );
-        }
-      }
-    }
+    ARM_COMPUTE_UNUSED(os);
   }
 };
 
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
index 5979862..227d808 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,236 +24,264 @@
 
 #pragma once
 
-#include "pool_common.hpp"
+#include "arm_compute/core/Error.h"
+#include "depthfirst_driver.hpp"
 #include "utils.hpp"
+#include <alloca.h>
 
 namespace arm_conv {
 namespace pooling {
 
-template <class strategy>
-class PoolingDepthfirstGeneric : public PoolingCommon<typename strategy::operand_type, typename strategy::return_type>
+template <typename TInput, typename TOutput, typename OutputStage = Nothing>
+class IGenericDepthfirstStrategy;
+
+template <typename TInput, typename TOutput>
+class IGenericDepthfirstStrategy<TInput, TOutput, Nothing>
 {
-  using TInput = typename strategy::operand_type;
-  using TOutput = typename strategy::return_type;
+  public:
+  virtual ~IGenericDepthfirstStrategy() = default;
 
-  const PoolingArgs m_args;  // Copy of arguments
+  typedef void (*KernelType)(
+    uint64_t window_cells,
+    uint64_t n_valid_cells,
+    uint64_t n_channels,
+    const TInput *const *,
+    TOutput *
+  );
 
-  unsigned int input_rows(void) const
+  virtual KernelType get_kernel(void) const = 0;
+};
+
+template <typename TInput, typename TOutput>
+class IGenericDepthfirstStrategy<TInput, TOutput, Requantize32>
+{
+  public:
+  virtual ~IGenericDepthfirstStrategy() = default;
+
+  typedef void (*KernelType)(
+    uint64_t window_cells,
+    uint64_t n_valid_cells,
+    uint64_t n_channels,
+    const TInput *const *,
+    TOutput *,
+    const Requantize32 &
+  );
+
+  virtual KernelType get_kernel(void) const = 0;
+};
+
+template <typename TInput, typename TOutput, typename OutputStage>
+struct Invoker;
+
+template <typename TInput, typename TOutput>
+struct Invoker<TInput, TOutput, Nothing>
+{
+  static inline void invoke(
+    const typename IGenericDepthfirstStrategy<TInput, TOutput, Nothing>::KernelType kern,
+    uint64_t window_cells,
+    uint64_t n_valid_cells,
+    uint64_t n_channels,
+    const TInput *const *inptrs,
+    TOutput *outptr,
+    const Nothing &
+  )
   {
-    return m_args.pool_window.rows;
+    kern(window_cells, n_valid_cells, n_channels, inptrs, outptr);
+  }
+};
+
+template <typename TInput, typename TOutput>
+struct Invoker<TInput, TOutput, Requantize32>
+{
+  static inline void invoke(
+    const typename IGenericDepthfirstStrategy<TInput, TOutput, Requantize32>::KernelType kern,
+    uint64_t window_cells,
+    uint64_t n_valid_cells,
+    uint64_t n_channels,
+    const TInput *const *inptrs,
+    TOutput *outptr,
+    const Requantize32 &qp
+  )
+  {
+    kern(window_cells, n_valid_cells, n_channels, inptrs, outptr, qp);
+  }
+};
+
+template <typename TInput, typename TOutput, typename OutputStage>
+class GenericDepthfirstWrapper : public IDepthfirstStrategy
+{
+  using StratType = IGenericDepthfirstStrategy<TInput, TOutput, OutputStage>;
+
+  std::unique_ptr<const StratType> m_strat;
+  const unsigned int window_rows, window_cols;
+
+  public:
+  GenericDepthfirstWrapper(const StratType *strat, const PoolingArgs &args)
+  : m_strat(strat), window_rows(args.pool_window.rows), window_cols(args.pool_window.cols)
+  {
   }
 
-  unsigned int input_cols(void) const
+  unsigned int get_input_rows(void) const override { return window_rows; }
+  unsigned int get_input_cols(void) const override { return window_cols; }
+  unsigned int get_output_rows(void) const override { return 1; }
+  unsigned int get_output_cols(void) const override { return 1; }
+
+  typename StratType::KernelType get_kernel(void) const { return m_strat->get_kernel(); }
+};
+
+template <typename TInput, typename TOutput=TInput, typename OutputStage=Nothing>
+class PoolingDepthfirstGeneric : public DepthfirstDriver<TInput, TOutput>
+{
+  const OutputStage m_os;
+
+  protected:
+  size_t get_working_size_per_thread(unsigned int) const override { return 0; }
+  void initialise_working_space(void *, unsigned int) const override { /* Nothing */ }
+
+  /* Compute a portion of the output tensor with padding. */
+  void compute_tile_padded(
+    unsigned int output_i, unsigned int output_j,
+    unsigned int channel_start, unsigned int channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *
+  ) const override
   {
-    return m_args.pool_window.cols;
+    // Determine start position and padding
+    const int start_i = static_cast<int>(output_i * this->m_args.pool_stride.rows) - this->m_args.padding.top;
+    const auto input_i = static_cast<unsigned int>(start_i < 0 ? 0 : start_i);
+    const auto pad_top = static_cast<unsigned int>(start_i < 0 ? -start_i : 0);
+    const int end_i = start_i + this->m_args.pool_window.rows;
+    const auto pad_bottom = static_cast<unsigned int>((unsigned int) end_i < this->m_args.input_rows ? 0 : end_i - this->m_args.input_rows);
+    const auto valid_rows = this->m_args.pool_window.rows - (pad_top + pad_bottom);
+
+    const int start_j = static_cast<int>(output_j * this->m_args.pool_stride.cols) - this->m_args.padding.left;
+    const auto input_j = static_cast<unsigned int>(start_j < 0 ? 0 : start_j);
+    const auto pad_left = static_cast<unsigned int>(start_j < 0 ? -start_j : 0);
+    const int end_j = start_j + this->m_args.pool_window.cols;
+    const auto pad_right = static_cast<unsigned int>((unsigned int) end_j < this->m_args.input_cols ? 0 : end_j - this->m_args.input_cols);
+    const auto valid_cols = this->m_args.pool_window.cols - (pad_left + pad_right);
+
+    // Determine the number of valid cells and prepare the pointers
+    const auto n_valid_cells = valid_rows * valid_cols;
+    auto inptrs = reinterpret_cast<const TInput **>(alloca(n_valid_cells * sizeof(TInput *)));
+    {
+      auto my_ptr = inptrs;
+      auto row_ptr = input.base + input_i*input.ld_row + input_j*input.ld_col + channel_start;
+      for (auto i = valid_rows; i; i--)
+      {
+        auto ptr = row_ptr;
+        row_ptr += input.ld_row;
+
+        for (auto j = valid_cols; j; j--)
+        {
+          *(my_ptr++) = ptr;
+          ptr += input.ld_col;
+        }
+      }
+    }
+
+    auto outptr = output.base + output_i*output.ld_row + output_j*output.ld_col + channel_start;
+
+    // Some padding variants include (or exclude) the padding values; we handle
+    // this by computing the extent of the padded input tensor and hence
+    // computing the total number of cells captured in the pooling window.
+    const auto bottom_padded_height = this->m_args.input_rows + this->m_args.padding.bottom;
+    const auto captured_rows = std::min<int>(end_i, bottom_padded_height) - start_i;
+    const auto right_padded_width = this->m_args.input_cols + this->m_args.padding.right;
+    const auto captured_cols = std::min<int>(end_j, right_padded_width) - start_j;
+    const auto captured_cells = captured_rows * captured_cols;
+    const auto window_cells = this->m_args.exclude_padding ? n_valid_cells : captured_cells;
+
+    // Execute the kernel
+    Invoker<TInput, TOutput, OutputStage>::invoke(
+      reinterpret_cast<const GenericDepthfirstWrapper<TInput, TOutput, OutputStage> *>(this->m_strat.get())->get_kernel(),
+      window_cells, n_valid_cells, channel_end - channel_start, inptrs, outptr, m_os
+    );
+  }
+
+  // Compute a portion of the work with only top/bottom padding.
+  void compute_row_padded_tile_row(
+    const unsigned int output_i, unsigned int output_j, unsigned int n_tile_cols,
+    const unsigned int channel_start, const unsigned int channel_end,
+    const TensorSpec<const TInput *> &input,
+    const TensorSpec<TOutput *> &output,
+    void *working_space
+  ) const override
+  {
+    ARM_COMPUTE_UNUSED(working_space);
+    // Determine start position and padding
+    const int start_i = static_cast<int>(output_i * this->m_args.pool_stride.rows) - this->m_args.padding.top;
+    const auto input_i = static_cast<unsigned int>(start_i < 0 ? 0 : start_i);
+    const auto pad_top = static_cast<unsigned int>(start_i < 0 ? -start_i : 0);
+    const int end_i = start_i + this->m_args.pool_window.rows;
+    const auto pad_bottom = static_cast<unsigned int>((unsigned int) end_i < this->m_args.input_rows ? 0 : end_i - this->m_args.input_rows);
+    const auto valid_rows = this->m_args.pool_window.rows - (pad_top + pad_bottom);
+
+    const int start_j = static_cast<int>(output_j * this->m_args.pool_stride.cols) - this->m_args.padding.left;
+    const auto input_j = static_cast<unsigned int>(start_j < 0 ? 0 : start_j);
+    const auto valid_cols = this->m_args.pool_window.cols;
+
+    // Determine the number of valid cells and prepare the pointers
+    const auto n_valid_cells = valid_rows * valid_cols;
+    auto inptrs = reinterpret_cast<const TInput **>(alloca(n_valid_cells * sizeof(TInput *)));
+    {
+      auto my_ptr = inptrs;
+      auto row_ptr = input.base + input_i*input.ld_row + input_j*input.ld_col + channel_start;
+      for (auto i = valid_rows; i; i--)
+      {
+        auto ptr = row_ptr;
+        row_ptr += input.ld_row;
+
+        for (auto j = valid_cols; j; j--)
+        {
+          *(my_ptr++) = ptr;
+          ptr += input.ld_col;
+        }
+      }
+    }
+
+    auto outptr = output.base + output_i*output.ld_row + output_j*output.ld_col + channel_start;
+
+    // Some padding variants include (or exclude) the padding values; we handle
+    // this by computing the extent of the padded input tensor and hence
+    // computing the total number of cells captured in the pooling window.
+    const auto bottom_padded_height = this->m_args.input_rows + this->m_args.padding.bottom;
+    const auto captured_rows = std::min<int>(end_i, bottom_padded_height) - start_i;
+    const auto captured_cells = captured_rows * valid_cols;
+    const auto window_cells = this->m_args.exclude_padding ? n_valid_cells : captured_cells;
+
+    for (; n_tile_cols; n_tile_cols--)
+    {
+      // Execute the kernel
+      Invoker<TInput, TOutput, OutputStage>::invoke(
+        reinterpret_cast<const GenericDepthfirstWrapper<TInput, TOutput, OutputStage> *>(this->m_strat.get())->get_kernel(),
+        window_cells, n_valid_cells, channel_end - channel_start, inptrs, outptr, m_os
+      );
+
+      // Update the pointers; the output strides by a column and the inputs
+      // stride by a number of columns.
+      outptr += output.ld_col;
+      for (auto n = 0u; n < n_valid_cells; n++)
+      {
+        inptrs[n] += this->m_args.pool_stride.cols * input.ld_col;
+      }
+    }
   }
 
   public:
-  PoolingDepthfirstGeneric(const PoolingArgs &args) : m_args(args)
+  PoolingDepthfirstGeneric(
+    const IGenericDepthfirstStrategy<TInput, TOutput, OutputStage> *strat,
+    const PoolingArgs &args,
+    const OutputStage &os = {}
+  )
+  : DepthfirstDriver<TInput, TOutput>(
+      new GenericDepthfirstWrapper<TInput, TOutput, OutputStage>(strat, args),
+      args
+    ),
+    m_os(os)
   {
   }
-
-  PoolingDepthfirstGeneric(PoolingDepthfirstGeneric &) = delete;
-  PoolingDepthfirstGeneric &operator=(PoolingDepthfirstGeneric &) = delete;
-
-  size_t sizeof_input_pointer_array(void) const
-  {
-    return sizeof(TInput *) * input_rows() * input_cols();
-  }
-
-  size_t get_working_size(unsigned int num_threads) const override
-  {
-    return num_threads * sizeof_input_pointer_array();
-  }
-
-  void execute(
-    const void *const input,
-    void *const output,
-    void *const working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    const size_t ld_input_col = m_args.n_channels;
-    const size_t ld_input_row = ld_input_col * m_args.input_cols;
-    const size_t ld_input_batch = ld_input_row * m_args.input_rows;
-    const size_t ld_output_col = ld_input_col;
-    const size_t ld_output_row = ld_output_col * m_args.output_cols;
-    const size_t ld_output_batch = ld_output_row * m_args.output_rows;
-
-    execute(
-      input, ld_input_col, ld_input_row, ld_input_batch,
-      output, ld_output_col, ld_output_row, ld_output_batch,
-      working_space,
-      thread_id, num_threads
-    );
-  }
-
-  void execute(
-    const void *const input,
-    size_t ld_input_col,
-    size_t ld_input_row,
-    size_t ld_input_batch,
-    void *const output,
-    size_t ld_output_col,
-    size_t ld_output_row,
-    size_t ld_output_batch,
-    void *const working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    execute(
-      m_args.n_batches, m_args.input_rows, m_args.input_cols,
-      m_args.n_channels,
-      input, ld_input_col, ld_input_row, ld_input_batch,
-      m_args.padding,
-      m_args.output_rows, m_args.output_cols,
-      output, ld_output_col, ld_output_row, ld_output_batch,
-      working_space,
-      thread_id, num_threads
-    );
-  }
-
-  void execute(
-    unsigned int batches,
-    unsigned int height,
-    unsigned int width,
-    unsigned int channels,
-    const void *const _input,
-    size_t ld_input_col,
-    size_t ld_input_row,
-    size_t ld_input_batch,
-    const PaddingValues &padding,
-    unsigned int output_height,
-    unsigned int output_width,
-    void *const _output,
-    size_t ld_output_col,
-    size_t ld_output_row,
-    size_t ld_output_batch,
-    void *const _working_space,
-    unsigned int thread_id,
-    unsigned int num_threads
-  ) const override
-  {
-    strategy strat(m_args.cpu_info);
-#ifdef CYCLE_PROFILING
-    arm_gemm::profiler prof;
-#endif // CYCLE_PROFILING
-
-    const unsigned int roundup_output_rows = roundup(output_height, num_threads);
-    const unsigned int rows_per_thread = roundup_output_rows / num_threads;
-    int start_out_height = static_cast<int>(thread_id * rows_per_thread);
-    int end_out_height = std::min<int>(output_height, static_cast<int>((thread_id + 1) * rows_per_thread));
-
-    unsigned int start_channel = 0;
-    unsigned int end_channel = channels;
-    if(output_height == 1)
-    {
-      const unsigned int channels_per_thread = roundup(channels, num_threads) / num_threads;
-      start_channel = thread_id * channels_per_thread;
-      end_channel = std::min(start_channel + channels_per_thread, channels);
-
-      // Reset start and end rows
-      start_out_height = 0;
-      end_out_height = output_height;
-    }
-
-    if(start_channel >= end_channel)
-    {
-        // Early exit in case of multiple threads parallelising on channels
-        return;
-    }
-
-    // Cast input and output pointers into the right types
-    const TInput *const inptr = static_cast<const TInput *>(_input) + start_channel;
-    TOutput *const outptr = static_cast<TOutput *>(_output) + start_channel;
-
-    // Grab the input pointer array
-    uint8_t *const working_space = static_cast<uint8_t *>(_working_space);
-    const TInput **const inptr_array = reinterpret_cast<const TInput **>(working_space + thread_id * sizeof_input_pointer_array());
-
-    // For each output tile, construct the requisite set of pointers and call
-    // into the kernel.
-    for (unsigned int batch = 0; batch < batches; batch++)
-    {
-      // Get batch pointers
-      const auto inptr_batch = inptr + batch * ld_input_batch;
-      auto outptr_row = outptr + batch * ld_output_batch + start_out_height * ld_output_row;
-
-      for (int out_i = start_out_height; out_i < end_out_height; out_i++)
-      {
-        const int start_in_i = out_i * m_args.pool_stride.rows - padding.top;
-        const int end_in_i = start_in_i + m_args.pool_window.rows;
-
-        // Compute top/bottom padding
-        const auto pad_top = static_cast<unsigned int>(std::max(0 - start_in_i, 0));
-        const auto pad_bottom = static_cast<unsigned int>(std::max<int>(end_in_i - height, 0));
-        const auto valid_rows = input_rows() - pad_top - pad_bottom;
-
-        // Compute the number of pooling window rows which are contained in
-        // either the valid region of the input tensor, or the padding.
-        const auto padded_bottom = std::min<unsigned int>(
-          start_in_i + m_args.pool_window.rows, height + padding.bottom
-        );
-        const auto n_total_rows = padded_bottom - start_in_i;
-
-        auto outptr_col = outptr_row;
-        auto inptr_row = inptr_batch + (start_in_i + pad_top) * ld_input_row;
-
-        for (int out_j = 0, start_in_j = -padding.left;
-             out_j < static_cast<int>(output_width);
-             out_j++, start_in_j += m_args.pool_stride.cols)
-        {
-          const int end_in_j = start_in_j + m_args.pool_window.cols;
-
-          // Compute left/right padding
-          const auto pad_left = static_cast<unsigned int>(std::max(0 - start_in_j, 0));
-          const auto pad_right = static_cast<unsigned int>(std::max<int>(0, end_in_j - width));
-          const auto valid_cols = input_cols() - pad_left - pad_right;
-
-          // Compute the number of pooling window columns which are contained
-          // in either the valid region of the input tensor, or the padding.
-          const auto padded_right = std::min<unsigned int>(
-            start_in_j + m_args.pool_window.cols, width + padding.right
-          );
-          const auto n_total_cols = padded_right - start_in_j;
-
-          // Construct the input pointer array - fill in all valid points
-          // contiguously.
-          const TInput **ptrs = inptr_array;
-          const TInput *rowptr = inptr_row + (start_in_j + pad_left) * ld_input_col;
-          for (auto i = 0u; i < valid_rows; i++)
-          {
-            const TInput *colptr = rowptr;
-            for (auto j = 0u; j < valid_cols; j++)
-            {
-              *(ptrs++) = colptr;
-              colptr += ld_input_col;
-            }
-            rowptr += ld_input_row;
-          }
-
-          // Compute the number of valid cells
-          const auto valid_cells = valid_rows * valid_cols;
-          const auto cells_in_range = n_total_rows * n_total_cols;
-          const auto window_cells = m_args.exclude_padding ? valid_cells : cells_in_range;
-
-          // Get the output pointer for this call
-          TOutput *outptr = outptr_col;
-          outptr_col += ld_output_col;
-
-#ifdef CYCLE_PROFILING
-          // TODO Work number
-          auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::out_rows() * strategy::out_cols() * strategy::pool_rows() * strategy::pool_cols()));
-#endif // CYCLE_PROFILING
-          strat.kernel(window_cells, valid_cells, end_channel - start_channel, inptr_array, outptr);
-        }
-
-        outptr_row += ld_output_row;
-      }
-    }
-  }
 };
 
 }  // namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
index 42f23a1..e3ce652 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp16.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -48,19 +48,6 @@
 namespace arm_conv {
 namespace pooling {
 
-namespace
-{
-  template <class Strategy>
-  bool is_supported(const PoolingArgs &args, const Nothing &)
-  {
-    return ((args.pool_type == Strategy::pooling_type()) &&
-            (args.pool_window.rows == Strategy::pool_rows()) &&
-            (args.pool_window.cols == Strategy::pool_cols()) &&
-            (args.pool_stride.rows == Strategy::stride_rows()) &&
-            (args.pool_stride.cols == Strategy::stride_cols()));
-  }
-}
-
 static const PoolingImplementation<__fp16, __fp16> pooling_fp16_methods[] = {
   {
     PoolingMethod::DEPTHFIRST,
@@ -70,7 +57,8 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<__fp16>>(args);
+      auto strat = new cpp_nhwc_1x1_stride_any_depthfirst<__fp16>(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
     },
   },
 #if defined(__aarch64__)
@@ -78,41 +66,51 @@
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args, unused);
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirst<sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, unused);
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, os);
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirst<sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp16_nhwc_avg_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_sve() && args.pool_type == PoolingType::AVERAGE; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sve() && args.pool_type == PoolingType::AVERAGE;
+    },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirstGeneric<sve_fp16_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new sve_fp16_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp16_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX;
+    },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirstGeneric<sve_fp16_nhwc_max_generic_depthfirst>(args);
+      auto strat = new sve_fp16_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
     },
   },
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -120,41 +118,41 @@
   {
     PoolingMethod::DEPTHFIRST,
     "a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_fp16() && is_supported<a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args, unused);
-    },
+    is_supported<a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirst<a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_fp16() && is_supported<a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, unused);
-    },
+    is_supported<a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirst<a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "a64_fp16_nhwc_avg_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_fp16() && args.pool_type == PoolingType::AVERAGE; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirstGeneric<a64_fp16_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new a64_fp16_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "a64_fp16_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_fp16() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<__fp16, __fp16> * {
-      return new PoolingDepthfirstGeneric<a64_fp16_nhwc_max_generic_depthfirst>(args);
+      auto strat = new a64_fp16_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<__fp16>(strat, args);
     },
   },
 #endif  // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
index 1905e1e..5ee0884 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -45,19 +45,6 @@
 namespace arm_conv {
 namespace pooling {
 
-namespace
-{
-  template <class Strategy>
-  bool is_supported(const PoolingArgs &args, const Nothing &)
-  {
-    return ((args.pool_type == Strategy::pooling_type()) &&
-            (args.pool_window.rows == Strategy::pool_rows()) &&
-            (args.pool_window.cols == Strategy::pool_cols()) &&
-            (args.pool_stride.rows == Strategy::stride_rows()) &&
-            (args.pool_stride.cols == Strategy::stride_cols()));
-  }
-}
-
 static const PoolingImplementation<float, float> pooling_fp32_methods[] = {
   {
     PoolingMethod::DEPTHFIRST,
@@ -67,7 +54,8 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<float>>(args);
+      auto strat = new cpp_nhwc_1x1_stride_any_depthfirst<float>(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float, float, Nothing>(strat, args);
     },
   },
 #if defined(__aarch64__)
@@ -75,23 +63,27 @@
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args, unused);
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirst<sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, unused);
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args, os);
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirst<sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
     },
   },
   {
@@ -102,7 +94,8 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirstGeneric<sve_fp32_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new sve_fp32_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
     },
   },
   {
@@ -113,7 +106,8 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirstGeneric<sve_fp32_nhwc_max_generic_depthfirst>(args);
+      auto strat = new sve_fp32_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
     },
   },
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -123,7 +117,8 @@
     is_supported<a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirst<a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
     },
   },
   {
@@ -132,7 +127,8 @@
     is_supported<a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirst<a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<float>(strat, args);
     },
   },
   {
@@ -141,7 +137,8 @@
     [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirstGeneric<a64_fp32_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new a64_fp32_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
     },
   },
   {
@@ -150,7 +147,8 @@
     [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<float, float> * {
-      return new PoolingDepthfirstGeneric<a64_fp32_nhwc_max_generic_depthfirst>(args);
+      auto strat = new a64_fp32_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<float>(strat, args);
     },
   },
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp
index 3d968b8..78320ce 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_implementation.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -39,7 +39,7 @@
   const char * name;
   std::function<bool(const PoolingArgs &, const OutputStage &)> is_supported;
   std::function<uint64_t(const PoolingArgs &, const OutputStage &)> cycle_estimate;
-  std::function<PoolingCommon<TInput, TOutput, OutputStage> *(const PoolingArgs &, const OutputStage &)> initialise;
+  std::function<PoolingCommon<TInput, TOutput> *(const PoolingArgs &, const OutputStage &)> initialise;
 
   bool get_is_supported(const PoolingArgs &args, const OutputStage &os) const
   {
@@ -51,7 +51,7 @@
     return (cycle_estimate == nullptr) ? 0 : cycle_estimate(args, os);
   }
 
-  PoolingCommon<TInput, TOutput, OutputStage> *get_instance(const PoolingArgs &args, const OutputStage &os) const
+  PoolingCommon<TInput, TOutput> *get_instance(const PoolingArgs &args, const OutputStage &os) const
   {
     return initialise(args, os);
   }
@@ -92,11 +92,21 @@
 }
 
 template <typename TInput, typename TOutput, class OutputStage>
-UniquePoolingCommon<TInput, TOutput, OutputStage> pooling(const PoolingArgs &args, const OutputStage &os)
+UniquePoolingCommon<TInput, TOutput> pooling(const PoolingArgs &args, const OutputStage &os)
 {
   const PoolingImplementation<TInput, TOutput, OutputStage> *impl = nullptr;
   const bool success = find_implementation<TInput, TOutput, OutputStage>(args, os, impl);
-  return UniquePoolingCommon<TInput, TOutput, OutputStage>(success ? impl->get_instance(args, os) : nullptr);
+  return UniquePoolingCommon<TInput, TOutput>(success ? impl->get_instance(args, os) : nullptr);
+}
+
+template <class Strategy>
+bool is_supported(const PoolingArgs &args, const Nothing &)
+{
+  return ((args.pool_type == Strategy::pooling_type) &&
+          (args.pool_window.rows == Strategy::pool_rows) &&
+          (args.pool_window.cols == Strategy::pool_cols) &&
+          (args.pool_stride.rows == Strategy::stride_rows) &&
+          (args.pool_stride.cols == Strategy::stride_cols));
 }
 
 }  //  namespace pooling
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
index 1cad674..0867abc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,9 +31,7 @@
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
 #include "kernels/sve_s8_nhwc_avg_generic_depthfirst.hpp"
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE2)
 #include "kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
 #include "kernels/sve_s8_nhwc_max_generic_depthfirst.hpp"
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -47,19 +45,6 @@
 namespace arm_conv {
 namespace pooling {
 
-namespace
-{
-  template <class Strategy>
-  bool is_supported(const PoolingArgs &args, const Nothing &)
-  {
-    return ((args.pool_type == Strategy::pooling_type()) &&
-            (args.pool_window.rows == Strategy::pool_rows()) &&
-            (args.pool_window.cols == Strategy::pool_cols()) &&
-            (args.pool_stride.rows == Strategy::stride_rows()) &&
-            (args.pool_stride.cols == Strategy::stride_cols()));
-  }
-}
-
 static const PoolingImplementation<int8_t, int8_t> pooling_s8_methods[] = {
   {
     PoolingMethod::DEPTHFIRST,
@@ -69,40 +54,47 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<int8_t>>(args);
+      auto strat = new cpp_nhwc_1x1_stride_any_depthfirst<int8_t>(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
     },
   },
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
-  {
-    PoolingMethod::DEPTHFIRST,
-    "sve_s8_nhwc_avg_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_sve2() && args.pool_type == PoolingType::AVERAGE; },
-    nullptr,
-    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirstGeneric<sve_s8_nhwc_avg_generic_depthfirst>(args);
-    },
-  },
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE2)
   {
     PoolingMethod::DEPTHFIRST,
     "sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, unused);
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirst<sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<int8_t>(strat, args);
+    },
+  },
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_s8_nhwc_avg_generic_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sve2() && args.pool_type == PoolingType::AVERAGE;
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sve_s8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_s8_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX;
+    },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirstGeneric<sve_s8_nhwc_max_generic_depthfirst>(args);
+      auto strat = new sve_s8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
     },
   },
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -112,7 +104,8 @@
     is_supported<a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirst<a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<int8_t>(strat, args);
     },
   },
   {
@@ -121,7 +114,8 @@
     [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::AVERAGE; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirstGeneric<a64_s8_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new a64_s8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
     },
   },
   {
@@ -130,7 +124,8 @@
     [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<int8_t, int8_t> * {
-      return new PoolingDepthfirstGeneric<a64_s8_nhwc_max_generic_depthfirst>(args);
+      auto strat = new a64_s8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t>(strat, args);
     },
   },
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
index bfc4dc0..6209f7c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_s8q.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,13 +25,13 @@
 #include "arm_gemm_local.hpp"
 
 #include "pooling_implementation.hpp"
-#include "pooling_depthfirst_generic_quantized.hpp"
+#include "pooling_depthfirst_generic.hpp"
 
 #if defined(__aarch64__)
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_s8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_s8q_nhwc_max_generic_depthfirst.hpp"
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/a64_s8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/a64_s8q_nhwc_max_generic_depthfirst.hpp"
 #endif  // defined(__aarch64__)
@@ -41,9 +41,9 @@
 namespace arm_conv {
 namespace pooling {
 
-static const PoolingImplementation<int8_t, int8_t, Requantize32> pooling_u8_methods[] = {
+static const PoolingImplementation<int8_t, int8_t, Requantize32> pooling_s8q_methods[] = {
 #if defined(__aarch64__)
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
     "sve_s8q_nhwc_avg_generic_depthfirst",
@@ -51,20 +51,24 @@
       return args.cpu_info->has_sve2() && args.pool_type == PoolingType::AVERAGE;
     },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<sve_s8q_nhwc_avg_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sve_s8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_s8q_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.cpu_info->has_sve2() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sve2() && args.pool_type == PoolingType::MAX;
+    },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<sve_s8q_nhwc_max_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new sve_s8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
     },
   },
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
     "a64_s8q_nhwc_avg_generic_depthfirst",
@@ -72,8 +76,9 @@
       return args.pool_type == PoolingType::AVERAGE;
     },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<a64_s8q_nhwc_avg_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new a64_s8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
     },
   },
   {
@@ -81,8 +86,9 @@
     "a64_s8q_nhwc_max_generic_depthfirst",
     [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<a64_s8q_nhwc_max_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<int8_t, int8_t> * {
+      auto strat = new a64_s8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<int8_t, int8_t, Requantize32>(strat, args, rq);
     },
   },
 #endif  // defined(__aarch64__)
@@ -92,10 +98,10 @@
 template <>
 const PoolingImplementation<int8_t, int8_t, Requantize32> *pooling_implementation_list()
 {
-  return pooling_u8_methods;
+  return pooling_s8q_methods;
 }
 
-template UniquePoolingCommon<int8_t, int8_t, Requantize32> pooling(const PoolingArgs &, const Requantize32 &);
+template UniquePoolingCommon<int8_t, int8_t> pooling(const PoolingArgs &, const Requantize32 &);
 
 }  //  namespace pooling
 }  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
index f6ea980..b0c908a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,9 +31,7 @@
 #include "kernels/cpp_nhwc_1x1_stride_any_depthfirst.hpp"
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
 #include "kernels/sve_u8_nhwc_avg_generic_depthfirst.hpp"
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE2)
 #include "kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst.hpp"
 #include "kernels/sve_u8_nhwc_max_generic_depthfirst.hpp"
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -47,19 +45,6 @@
 namespace arm_conv {
 namespace pooling {
 
-namespace
-{
-  template <class Strategy>
-  bool is_supported(const PoolingArgs &args, const Nothing &)
-  {
-    return ((args.pool_type == Strategy::pooling_type()) &&
-            (args.pool_window.rows == Strategy::pool_rows()) &&
-            (args.pool_window.cols == Strategy::pool_cols()) &&
-            (args.pool_stride.rows == Strategy::stride_rows()) &&
-            (args.pool_stride.cols == Strategy::stride_cols()));
-  }
-}
-
 static const PoolingImplementation<uint8_t, uint8_t> pooling_u8_methods[] = {
   {
     PoolingMethod::DEPTHFIRST,
@@ -69,12 +54,25 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirstGeneric<cpp_nhwc_1x1_stride_any_depthfirst<uint8_t>>(args);
+      auto strat = new cpp_nhwc_1x1_stride_any_depthfirst<uint8_t>(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
     },
   },
 #if defined(__aarch64__)
 #if defined(ARM_COMPUTE_ENABLE_SVE)
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
+  {
+    PoolingMethod::DEPTHFIRST,
+    "sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst",
+    [] (const PoolingArgs &args, const Nothing &os) -> bool {
+      return args.cpu_info->has_sve() &&
+             is_supported<sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, os);
+    },
+    nullptr,
+    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<uint8_t>(strat, args);
+    },
+  },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_u8_nhwc_avg_generic_depthfirst",
@@ -82,35 +80,28 @@
       // This kernel can only be used when there is either no padding, or we don't care
       // about the value of the padding. Otherwise, we would need to pass in the zero-point
       // for the quantization regime.
-      return args.cpu_info->has_sve2() && (args.exclude_padding ||
+      return (args.exclude_padding ||
               (args.padding.top == 0 && args.padding.bottom == 0 &&
                args.padding.left == 0 && args.padding.right == 0)
-              ) && args.pool_type == PoolingType::AVERAGE;
+              ) && args.pool_type == PoolingType::AVERAGE &&
+             args.cpu_info->has_sve2();
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirstGeneric<sve_u8_nhwc_avg_generic_depthfirst>(args);
-    },
-  },
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE2)
-  {
-    PoolingMethod::DEPTHFIRST,
-    "sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &unused) -> bool {
-      return args.cpu_info->has_sve() && is_supported<sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args, unused);
-    },
-    nullptr,
-    [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirst<sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new sve_u8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_u8_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Nothing &) -> bool { return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Nothing &) -> bool {
+      return args.cpu_info->has_sve() && args.pool_type == PoolingType::MAX;
+    },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirstGeneric<sve_u8_nhwc_max_generic_depthfirst>(args);
+      auto strat = new sve_u8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
     },
   },
 #endif  // defined(ARM_COMPUTE_ENABLE_SVE)
@@ -120,7 +111,8 @@
     is_supported<a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst>,
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirst<a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst>(args);
+      auto strat = new a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst(args.cpu_info);
+      return new PoolingDepthfirst<uint8_t>(strat, args);
     },
   },
   {
@@ -137,7 +129,8 @@
     },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirstGeneric<a64_u8_nhwc_avg_generic_depthfirst>(args);
+      auto strat = new a64_u8_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
     },
   },
   {
@@ -146,7 +139,8 @@
     [] (const PoolingArgs &args, const Nothing &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
     [] (const PoolingArgs &args, const Nothing &) -> PoolingCommon<uint8_t, uint8_t> * {
-      return new PoolingDepthfirstGeneric<a64_u8_nhwc_max_generic_depthfirst>(args);
+      auto strat = new a64_u8_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t>(strat, args);
     },
   },
 #endif  // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
index 647e319..de0420a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,13 +25,13 @@
 #include "arm_gemm_local.hpp"
 
 #include "pooling_implementation.hpp"
-#include "pooling_depthfirst_generic_quantized.hpp"
+#include "pooling_depthfirst_generic.hpp"
 
 #if defined(__aarch64__)
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/sve_u8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/sve_u8q_nhwc_max_generic_depthfirst.hpp"
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
 #include "kernels/a64_u8q_nhwc_avg_generic_depthfirst.hpp"
 #include "kernels/a64_u8q_nhwc_max_generic_depthfirst.hpp"
 #endif  // defined(__aarch64__)
@@ -41,9 +41,9 @@
 namespace arm_conv {
 namespace pooling {
 
-static const PoolingImplementation<uint8_t, uint8_t, Requantize32> pooling_u8_methods[] = {
+static const PoolingImplementation<uint8_t, uint8_t, Requantize32> pooling_u8q_methods[] = {
 #if defined(__aarch64__)
-#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
     "sve_u8q_nhwc_avg_generic_depthfirst",
@@ -51,20 +51,24 @@
       return args.cpu_info->has_sve2() && args.pool_type == PoolingType::AVERAGE;
     },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<sve_u8q_nhwc_avg_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sve_u8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
     },
   },
   {
     PoolingMethod::DEPTHFIRST,
     "sve_u8q_nhwc_max_generic_depthfirst",
-    [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.cpu_info->has_sve2() && args.pool_type == PoolingType::MAX; },
+    [] (const PoolingArgs &args, const Requantize32 &) -> bool {
+      return args.cpu_info->has_sve2() && args.pool_type == PoolingType::MAX;
+    },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<sve_u8q_nhwc_max_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new sve_u8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
     },
   },
-#endif  // defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2)
+#endif  // defined(ARM_COMPUTE_ENABLE_SVE)
   {
     PoolingMethod::DEPTHFIRST,
     "a64_u8q_nhwc_avg_generic_depthfirst",
@@ -72,8 +76,9 @@
       return args.pool_type == PoolingType::AVERAGE;
     },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<a64_u8q_nhwc_avg_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new a64_u8q_nhwc_avg_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
     },
   },
   {
@@ -81,8 +86,9 @@
     "a64_u8q_nhwc_max_generic_depthfirst",
     [] (const PoolingArgs &args, const Requantize32 &) -> bool { return args.pool_type == PoolingType::MAX; },
     nullptr,
-    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t, Requantize32> * {
-      return new PoolingDepthfirstGenericQuantized<a64_u8q_nhwc_max_generic_depthfirst>(args, rq);
+    [] (const PoolingArgs &args, const Requantize32 &rq) -> PoolingCommon<uint8_t, uint8_t> * {
+      auto strat = new a64_u8q_nhwc_max_generic_depthfirst(args.cpu_info);
+      return new PoolingDepthfirstGeneric<uint8_t, uint8_t, Requantize32>(strat, args, rq);
     },
   },
 #endif  // defined(__aarch64__)
@@ -92,10 +98,10 @@
 template <>
 const PoolingImplementation<uint8_t, uint8_t, Requantize32> *pooling_implementation_list()
 {
-  return pooling_u8_methods;
+  return pooling_u8q_methods;
 }
 
-template UniquePoolingCommon<uint8_t, uint8_t, Requantize32> pooling(const PoolingArgs &, const Requantize32 &);
+template UniquePoolingCommon<uint8_t, uint8_t> pooling(const PoolingArgs &, const Requantize32 &);
 
 }  //  namespace pooling
 }  //  namespace arm_conv
diff --git a/src/core/NEON/kernels/assembly/pool_common.hpp b/src/core/NEON/kernels/assembly/pool_common.hpp
index b6a0a0a..599e18a 100644
--- a/src/core/NEON/kernels/assembly/pool_common.hpp
+++ b/src/core/NEON/kernels/assembly/pool_common.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,8 +23,9 @@
  */
 
 #pragma once
-
-#include "common.hpp"
+#ifdef CYCLE_PROFILING
+#include "profiler.hpp"
+#endif
 
 namespace arm_conv
 {
@@ -53,6 +54,11 @@
     unsigned int rows, cols;
 };
 
+struct PaddingValues
+{
+    unsigned int left, top, right, bottom;
+};
+
 class IPoolingCommon
 {
 public:
@@ -60,6 +66,7 @@
 
     // Determine the amount of working space required.
     virtual size_t get_working_size(unsigned int num_threads) const = 0;
+    virtual size_t get_working_size(unsigned int num_threads, unsigned int n_channels) const = 0;
 
     // Execute pooling over the specified area of memory.
     virtual void execute(
@@ -103,14 +110,5 @@
         unsigned int num_threads) const = 0;
 };
 
-struct Nothing
-{
-};
-
-template <typename TInput, typename TOutput, class OutputStage = Nothing>
-class PoolingCommon : public IPoolingCommon
-{
-};
-
 } // namespace pooling
 } // namespace arm_conv
diff --git a/src/core/NEON/kernels/assembly/pooling.hpp b/src/core/NEON/kernels/assembly/pooling.hpp
index 2325bd0..1b47853 100644
--- a/src/core/NEON/kernels/assembly/pooling.hpp
+++ b/src/core/NEON/kernels/assembly/pooling.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -27,8 +27,6 @@
 #include "arm_gemm_local.hpp"
 #include "pool_common.hpp"
 
-#include <memory>
-
 namespace arm_conv
 {
 namespace pooling
@@ -89,6 +87,10 @@
     }
 };
 
+struct Nothing
+{
+};
+
 struct Requantize32
 {
     int32_t input_offset  = 0;
@@ -106,12 +108,124 @@
     }
 };
 
-template <typename TInput, typename TOutput, class OutputStage = Nothing>
-using UniquePoolingCommon = std::unique_ptr<PoolingCommon<TInput, TOutput, OutputStage>>;
+template <typename TInput, typename TOutput>
+class PoolingCommon : public IPoolingCommon
+{
+protected:
+    const PoolingArgs m_args;
+
+public:
+    PoolingCommon(const PoolingArgs &args)
+        : m_args(args)
+    {
+    }
+    PoolingCommon(PoolingCommon &) = delete;
+    PoolingCommon &operator=(PoolingCommon &) = delete;
+
+    size_t get_working_size(unsigned int, unsigned int) const override = 0;
+    size_t get_working_size(unsigned int n_threads) const override
+    {
+        return this->get_working_size(n_threads, m_args.n_channels);
+    }
+
+    // Execute pooling over the specified area of memory.
+    void execute(
+        const void *const input,
+        void *const       output,
+        void             *working_space,
+        unsigned int      thread_id,
+        unsigned int      num_threads) const override
+    {
+        this->execute(
+            input,
+            m_args.n_channels,
+            m_args.n_channels * m_args.input_cols,
+            m_args.n_channels * m_args.input_cols * m_args.input_rows,
+            output,
+            m_args.n_channels,
+            m_args.n_channels * m_args.output_cols,
+            m_args.n_channels * m_args.output_cols * m_args.output_rows,
+            working_space,
+            thread_id, num_threads);
+    }
+
+    void execute(
+        const void *const input,
+        size_t            ld_input_col,
+        size_t            ld_input_row,
+        size_t            ld_input_batch,
+        void *const       output,
+        size_t            ld_output_col,
+        size_t            ld_output_row,
+        size_t            ld_output_batch,
+        void             *working_space,
+        unsigned int      thread_id,
+        unsigned int      num_threads) const override
+    {
+        this->execute(
+            m_args.n_batches, m_args.input_rows, m_args.input_cols, m_args.n_channels,
+            input, ld_input_col, ld_input_row, ld_input_batch,
+            m_args.padding, m_args.output_rows, m_args.output_cols,
+            output, ld_output_col, ld_output_row, ld_output_batch,
+            working_space, thread_id, num_threads);
+    }
+
+    void execute(
+        unsigned int         batches,
+        unsigned int         height,
+        unsigned int         width,
+        unsigned int         channels,
+        const void *const    input,
+        size_t               ld_input_col,
+        size_t               ld_input_row,
+        size_t               ld_input_batch,
+        const PaddingValues &padding,
+        unsigned int         output_height,
+        unsigned int         output_width,
+        void *const          output,
+        size_t               ld_output_col,
+        size_t               ld_output_row,
+        size_t               ld_output_batch,
+        void                *working_space,
+        unsigned int         thread_id,
+        unsigned int         num_threads) const override
+    {
+        this->execute_internal(
+            batches, height, width, channels, padding,
+            input, ld_input_col, ld_input_row, ld_input_batch,
+            output_height, output_width,
+            output, ld_output_col, ld_output_row, ld_output_batch,
+            working_space, thread_id, num_threads);
+    }
+
+protected:
+    virtual void execute_internal(
+        unsigned int batches,
+        unsigned int height,
+        unsigned int width,
+        unsigned int channels,
+        const PaddingValues &,
+        const void *const input,
+        size_t            ld_input_col,
+        size_t            ld_input_row,
+        size_t            ld_input_batch,
+        unsigned int      output_height,
+        unsigned int      output_width,
+        void *const       output,
+        size_t            ld_output_col,
+        size_t            ld_output_row,
+        size_t            ld_output_batch,
+        void             *working_space,
+        unsigned int      thread_id,
+        unsigned int      num_threads) const = 0;
+};
+
+template <typename TInput, typename TOutput>
+using UniquePoolingCommon = std::unique_ptr<PoolingCommon<TInput, TOutput>>;
 
 // Get a pooling engine
 template <typename TInput, typename TOutput = TInput, class OutputStage = Nothing>
-UniquePoolingCommon<TInput, TOutput, OutputStage> pooling(const PoolingArgs &, const OutputStage & = {});
+UniquePoolingCommon<TInput, TOutput> pooling(const PoolingArgs &, const OutputStage & = {});
 
 } // namespace pooling
 } // namespace arm_conv
diff --git a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
index 77428b5..10ff418 100644
--- a/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
+++ b/src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
@@ -156,7 +156,7 @@
 
     const auto in_ptr        = src->buffer() + src->info()->offset_first_element_in_bytes();
     auto       out_ptr       = dst->buffer() + dst->info()->offset_first_element_in_bytes();
-    auto       working_space = workspace->buffer() + workspace->info()->offset_first_element_in_bytes();
+    auto       working_space = (workspace == nullptr) ? nullptr : workspace->buffer() + workspace->info()->offset_first_element_in_bytes();
 
     const auto src_shape   = src->info()->tensor_shape();
     const auto dst_shape   = dst->info()->tensor_shape();
@@ -197,7 +197,7 @@
     arm_conv::pooling::PoolingStride stride{};
     std::tie(stride.cols, stride.rows) = info.pad_stride_info.stride();
 
-    const arm_conv::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
+    const arm_conv::pooling::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
 
     constexpr unsigned int idx_width    = 1;
     constexpr unsigned int idx_height   = 2;
@@ -236,7 +236,7 @@
     arm_conv::pooling::PoolingStride stride{};
     std::tie(stride.cols, stride.rows) = info.pad_stride_info.stride();
 
-    const arm_conv::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
+    const arm_conv::pooling::PaddingValues padding{ info.pad_stride_info.pad_left(), info.pad_stride_info.pad_top(), info.pad_stride_info.pad_right(), info.pad_stride_info.pad_bottom() };
 
     constexpr unsigned int idx_width    = 1;
     constexpr unsigned int idx_height   = 2;