COMPMID-2063: New Winograd implementation

Refactoring of winograd code reducing the size of the binaries
about 8X.

Change-Id: If8845bda324573e1a5cf436f354ac8603e88a92e
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/959
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Anthony Barbier <Anthony.barbier@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp
deleted file mode 100644
index ac83bf9..0000000
--- a/src/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/gemm.hpp"
-
-using namespace winograd;
-
-template <const int MB, const int NB, typename TIn, typename TOut>
-BatchedBlockedGemm<MB, NB, TIn, TOut>::BatchedBlockedGemm(
-  const unsigned int n_gemms,
-  const int M, const int K, const int N,
-  const int a_matrix_stride,
-  const int a_row_stride,
-  const int b_matrix_stride,
-  const int b_row_stride,
-  const int c_matrix_stride,
-  const int c_row_stride,
-  const TIn* const a_ptr,
-  const TIn* const b_ptr,
-  TOut* const c_ptr
-) : n_gemms(n_gemms), M(M), N(N), K(K),
-    a_matrix_stride(a_matrix_stride),
-    a_row_stride(a_row_stride),
-    b_matrix_stride(b_matrix_stride),
-    b_row_stride(b_row_stride),
-    c_matrix_stride(c_matrix_stride),
-    c_row_stride(c_row_stride),
-    a_ptr(a_ptr), b_ptr(b_ptr), c_ptr(c_ptr)
-{
-}
-
-template <const int MBlock, const int NBlock, typename TIn, typename TOut>
-unsigned int BatchedBlockedGemm<MBlock, NBlock, TIn, TOut>::get_window() const
-{
-  return n_gemms;
-}
-
-template <const int MBlock, const int NBlock, typename TIn, typename TOut>
-void BatchedBlockedGemm<MBlock, NBlock, TIn, TOut>::run(
-  const unsigned int start, const unsigned int stop
-)
-{
-  // Perform the specified GEMMs
-  for (unsigned int i = start; i < stop; i++)
-  {
-    // Get pointers to the relevant matrices
-    const TIn* const mtr_a = a_ptr + i*a_matrix_stride;
-    const TIn* const mtr_b = b_ptr + i*b_matrix_stride;
-    TOut* const mtr_c = c_ptr + i*c_matrix_stride;
-
-    // Perform the GEMM
-    BlockedGemm<MBlock, NBlock, TIn, TOut>(
-      mtr_a, mtr_b, mtr_c, M, K, N,
-      a_row_stride, b_row_stride, c_row_stride
-    );
-  }
-}
-
-template class winograd::BatchedBlockedGemm<4, 16, float, float>;
-
diff --git a/src/core/NEON/kernels/convolution/winograd/padding.cpp b/src/core/NEON/kernels/convolution/winograd/padding.cpp
new file mode 100644
index 0000000..46fe57c
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/padding.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <cstring>
+#include <cstdint>
+
+#include "padding.hpp"
+
+namespace padding
+{
+
+template <typename T>
+void copy_and_pad_tile(
+  const unsigned int tile_rows,
+  const unsigned int tile_cols,
+  const unsigned int n_channels,
+  const T* const inptr,
+  const unsigned int in_row_stride,
+  const unsigned int in_col_stride,
+  T* const outptr,
+  const unsigned int out_row_stride,
+  const unsigned int out_col_stride,
+  const unsigned int pad_top,
+  const unsigned int pad_left,
+  const unsigned int pad_bottom,
+  const unsigned int pad_right,
+  const T pad_value
+)
+{
+  for (unsigned int out_i = 0; out_i < tile_rows; out_i++)
+  {
+    for (unsigned int out_j = 0; out_j < tile_cols; out_j++)
+    {
+      T* const output = outptr + out_i*out_row_stride + out_j*out_col_stride;
+
+      if (out_i < pad_top || tile_rows - pad_bottom <= out_i ||
+          out_j < pad_left || tile_cols - pad_right <= out_j)
+      {
+        for (unsigned int n = 0; n < n_channels; n++)
+        {
+          output[n] = pad_value;
+        }
+      }
+      else
+      {
+        const auto in_i = out_i - pad_top, in_j = out_j - pad_left;
+        const T* const input = inptr + in_i*in_row_stride + in_j*in_col_stride;
+        std::memcpy(output, input, n_channels * sizeof(T));
+      }
+    }
+  }
+}
+
+template void copy_and_pad_tile(
+  unsigned int, unsigned int, unsigned int,
+  const uint8_t *, unsigned int, unsigned int,
+  uint8_t *, unsigned int, unsigned int,
+  unsigned int, unsigned int, unsigned int, unsigned int, uint8_t
+);
+
+template void copy_and_pad_tile(
+  unsigned int, unsigned int, unsigned int,
+  const float *, unsigned int, unsigned int,
+  float *, unsigned int, unsigned int,
+  unsigned int, unsigned int, unsigned int, unsigned int, float
+);
+
+template <unsigned int TileRows, unsigned int TileCols>
+void CopyCropped<TileRows, TileCols>::execute(
+  const size_t size,
+  const void * const inptr,
+  const size_t in_row_stride,
+  const size_t in_col_stride,
+  void * const outptr,
+  const size_t out_row_stride,
+  const size_t out_col_stride,
+  const unsigned int pad_top,
+  const unsigned int pad_left,
+  const unsigned int pad_bottom,
+  const unsigned int pad_right
+)
+{
+  for (unsigned int out_i = 0, in_i = pad_top; in_i < TileRows - pad_bottom; out_i++, in_i++)
+  {
+    for (unsigned int out_j = 0, in_j = pad_left; in_j < TileCols - pad_right; out_j++, in_j++)
+    {
+      std::memcpy(
+        static_cast<uint8_t *>(outptr) + out_i*out_row_stride + out_j*out_col_stride,
+        static_cast<const uint8_t *>(inptr) + in_i*in_row_stride + in_j*in_col_stride,
+        size
+      );
+    }
+  }
+}
+
+template class CopyCropped<2, 2>;
+template class CopyCropped<3, 3>;
+template class CopyCropped<4, 4>;
+
+template <typename T>
+void crop_and_copy_tile(
+  unsigned int tile_rows,
+  unsigned int tile_cols,
+  unsigned int n_channels,
+  const T *inptr,
+  unsigned int in_row_stride,
+  unsigned int in_col_stride,
+  T *outptr,
+  unsigned int out_row_stride,
+  unsigned int out_col_stride,
+  unsigned int crop_top,
+  unsigned int crop_left,
+  unsigned int crop_bottom,
+  unsigned int crop_right
+)
+{
+  for (unsigned int out_i = 0, in_i = crop_top; in_i < tile_rows - crop_bottom; out_i++, in_i++)
+  {
+    for (unsigned int out_j = 0, in_j = crop_left; in_j < tile_cols - crop_right; out_j++, in_j++)
+    {
+      std::memcpy(
+        outptr + out_i*out_row_stride + out_j*out_col_stride,
+        inptr + in_i*in_row_stride + in_j*in_col_stride,
+        sizeof(T) * n_channels
+      );
+    }
+  }
+}
+
+template void crop_and_copy_tile(
+  unsigned int tile_rows,
+  unsigned int tile_cols,
+  unsigned int n_channels,
+  const float *inptr,
+  unsigned int in_row_stride,
+  unsigned int in_col_stride,
+  float *outptr,
+  unsigned int out_row_stride,
+  unsigned int out_col_stride,
+  unsigned int crop_top,
+  unsigned int crop_left,
+  unsigned int crop_bottom,
+  unsigned int crop_right
+);
+
+}  // namespace padding
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp
deleted file mode 100644
index e66300d..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_1x8_fp32.cpp
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_1x8_fp32_process_tile(
-  int n_channels,
-  const float* const input_base,
-  const int input_row_stride,
-  const int input_col_stride,
-  float* const matrix_base,
-  const int matrix_stride,
-     const int _pad_top,
-     const int _pad_left,
-     const int _pad_bottom,
-    const int _pad_right
-)
-{
-  (void) input_row_stride;  // No rows over which to stride
- (void) _pad_top;  // Never any top padding
-  (void) _pad_bottom;  // Never any bottom padding
-
-  // Extract padding arguments
-  const int pad_left = Specialized ? PadLeft : _pad_left;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-
-  constexpr int inner_tile_cols = 8;
-  const int cells_j = inner_tile_cols - pad_right;
-
-  float *outptr = matrix_base;
-
-  // Get pointers into the input tile
-  const float *x_ptrs[inner_tile_cols];
-  for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
-  {
-    x_ptrs[j] = input_base + xj*input_col_stride;
-  }
-
-  // Vectors used/computed in this kernel.
-  float x[inner_tile_cols];
-  float U[inner_tile_cols];
-
-  for (int j = 0; j < inner_tile_cols; j++)
-  {
-    x[j] = 0.0f;
-  }
-
-  // Perform the Winograd input transformation for each channel in the input
-  // tensor.
-  int channels_remaining = n_channels;
-#ifdef __arm_any__
-  for (; channels_remaining >= 4; channels_remaining -= 4)
-  {
-    float32x4_t x[inner_tile_cols], U[inner_tile_cols];
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      x[j] = vdupq_n_f32(0.0f);
-    }
-
-    // Load x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      x[j] = vld1q_f32(x_ptrs[j]);
-      x_ptrs[j] += 4;
-    }
-
-    // Compute U = x . X
-    U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
-    U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
-    U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
-    U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
-    U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
-    U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
-    U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
-    U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
-    // Store the transformed vector
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      vst1q_f32(outptr + j*matrix_stride, U[j]);
-    }
-    outptr += 4;
-  }
-  for (; channels_remaining >= 2; channels_remaining -= 2)
-  {
-    float32x2_t x[inner_tile_cols], U[inner_tile_cols];
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      x[j] = vdup_n_f32(0.0f);
-    }
-
-    // Load x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      x[j] = vld1_f32(x_ptrs[j]);
-      x_ptrs[j] += 2;
-    }
-
-    // Compute U = x . X
-    U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
-    U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
-    U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
-    U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
-    U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
-    U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
-    U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
-    U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
-
-    // Store the transformed vector
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      vst1_f32(outptr + j*matrix_stride, U[j]);
-    }
-    outptr += 2;
-  }
-#endif  // __arm_any__
-  for (; channels_remaining; channels_remaining--)
-  {
-    // Load x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      x[j] = *(x_ptrs[j]++);
-    }
-
-    // Compute U = x . X
-    U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
-    U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
-    U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
-    U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
-    U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
-    U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
-    U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
-    U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
-
-    // Store the transformed vector
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      *(outptr + j*matrix_stride) = U[j];
-    }
-    outptr++;
-  }
-}
-
-}
-
-namespace winograd
-{
-template <int x>
-using Tiles = InputTransformImplTiles<1, x, 1, 8, float>;
-
-/*****************************************************************************/
-// 1x3 specialisations
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x5 specialisations
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-/*****************************************************************************/
-// 1x7 specialisations
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_generic = winograd_input_transform_1x8_fp32_process_tile<false>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_unpadded = winograd_input_transform_1x8_fp32_process_tile<true>;
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 1, 0, 0>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 3, 0, 0>,
-};
-
-template <>
-const Tiles<7>::TileFn Tiles<7>::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 4>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 5>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 6>,
-  winograd_input_transform_1x8_fp32_process_tile<true, 0, 0, 0, 7>,
-};
-/*****************************************************************************/
-
-
-template class InputTransform<1, 3, 1, 8, float>;
-template class InputTransform<3, 1, 8, 1, float>;
-template class InputTransform<1, 5, 1, 8, float>;
-template class InputTransform<5, 1, 8, 1, float>;
-template class InputTransform<1, 7, 1, 8, float>;
-template class InputTransform<7, 1, 8, 1, float>;
-}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
deleted file mode 100644
index 4203945..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace winograd
-{
-
-using Tiles = InputTransformImplTiles<3, 3, 4, 4, float>;
-
-namespace
-{
-
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_4x4_fp32_process_tile(
-  int n_channels,
-  const float* const input_base,
-  const int input_row_stride,
-  const int input_col_stride,
-  float* const matrix_base,
-    const int matrix_stride,
-     const int _pad_top,
-     const int _pad_left,
-     const int _pad_bottom,
-     const int _pad_right
-  )
-{
-const int pad_top = Specialized ? PadTop : _pad_top;
-  const int pad_left = Specialized ? PadLeft : _pad_left;
-  const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-
-  constexpr int inner_tile_i = 4, inner_tile_j = 4;
-  const int cells_i = inner_tile_i - pad_bottom;
-  const int cells_j = inner_tile_i - pad_right;
-
-
-
-  float *outptr = matrix_base;
-
-  // Get pointers into the input tile
-  const float *x_ptrs[inner_tile_i][inner_tile_j];
-  for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
-  {
-    // Get a pointer into the row
-    const float* const row_ptr = input_base + xi*input_row_stride;
-
-    for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
-    {
-      x_ptrs[i][j] = row_ptr + xj*input_col_stride;
-    }
-  }
-
-  // Matrices used/computed in this kernel.
-  float x[inner_tile_i][inner_tile_j];
-  float XTx[inner_tile_i][inner_tile_j];
-  float U[inner_tile_i][inner_tile_j];
-
-  for (int i = 0; i < inner_tile_i; i++)
-  {
-    for (int j = 0; j < inner_tile_j; j++)
-    {
-      x[i][j] = XTx[i][j] = 0.0f;
-    }
-  }
-
-  // Perform the Winograd input transformation for each channel in the input
-  // tensor.
-  int channels_remaining = n_channels;
-#ifdef __aarch64__
-  for (; channels_remaining >= 4; channels_remaining -= 4)
-  {
-    // Matrices used/computed in this kernel.
-    float32x4_t x[inner_tile_i][inner_tile_j];
-    float32x4_t XTx[inner_tile_i][inner_tile_j];
-    float32x4_t U[inner_tile_i][inner_tile_j];
-
-    for (int i = 0; i < inner_tile_i; i++)
-    {
-      for (int j = 0; j < inner_tile_j; j++)
-      {
-        x[i][j] = vdupq_n_f32(0.0f);
-        XTx[i][j] = vdupq_n_f32(0.0f);
-      }
-    }
-
-    // Load x
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = vld1q_f32(x_ptrs[i][j]);
-        x_ptrs[i][j] += 4;
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      // XTx[0][j] = x[0][j] - x[2][j];
-      XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
-
-      // XTx[1][j] = x[1][j] + x[2][j];
-      XTx[1][j] = vaddq_f32(x[1][j], x[2][j]);
-
-      // XTx[2][j] = x[2][j] - x[1][j];
-      XTx[2][j] = vsubq_f32(x[2][j], x[1][j]);
-
-      // XTx[3][j] = x[1][j] - x[3][j];
-      XTx[3][j] = vsubq_f32(x[1][j], x[3][j]);
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_i; i++)
-    {
-      // U[i][0] = XTx[i][0] - XTx[i][2];
-      U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
-
-      // U[i][1] = XTx[i][1] + XTx[i][2];
-      U[i][1] = vaddq_f32(XTx[i][1], XTx[i][2]);
-
-      // U[i][2] = XTx[i][2] - XTx[i][1];
-      U[i][2] = vsubq_f32(XTx[i][2], XTx[i][1]);
-
-      // U[i][3] = XTx[i][1] - XTx[i][3];
-      U[i][3] = vsubq_f32(XTx[i][1], XTx[i][3]);
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_i; i++)
-    {
-      for (int j = 0; j < inner_tile_j; j++, m++)
-      {
-        vst1q_f32(outptr + m*matrix_stride, U[i][j]);
-      }
-    }
-    outptr += 4;
-  }
-#endif  // __aarch64__
-#ifdef __arm_any__
-  for (; channels_remaining >= 2; channels_remaining -= 2)
-  {
-    // Matrices used/computed in this kernel.
-    float32x2_t x[inner_tile_i][inner_tile_j];
-    float32x2_t XTx[inner_tile_i][inner_tile_j];
-    float32x2_t U[inner_tile_i][inner_tile_j];
-
-    for (int i = 0; i < inner_tile_i; i++)
-    {
-      for (int j = 0; j < inner_tile_j; j++)
-      {
-        x[i][j] = vdup_n_f32(0.0f);
-        XTx[i][j] = vdup_n_f32(0.0f);
-      }
-    }
-
-    // Load x
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = vld1_f32(x_ptrs[i][j]);
-        x_ptrs[i][j] += 2;
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      // XTx[0][j] = x[0][j] - x[2][j];
-      XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
-
-      // XTx[1][j] = x[1][j] + x[2][j];
-      XTx[1][j] = vadd_f32(x[1][j], x[2][j]);
-
-      // XTx[2][j] = x[2][j] - x[1][j];
-      XTx[2][j] = vsub_f32(x[2][j], x[1][j]);
-
-      // XTx[3][j] = x[1][j] - x[3][j];
-      XTx[3][j] = vsub_f32(x[1][j], x[3][j]);
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_i; i++)
-    {
-      // U[i][0] = XTx[i][0] - XTx[i][2];
-      U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
-
-      // U[i][1] = XTx[i][1] + XTx[i][2];
-      U[i][1] = vadd_f32(XTx[i][1], XTx[i][2]);
-
-      // U[i][2] = XTx[i][2] - XTx[i][1];
-      U[i][2] = vsub_f32(XTx[i][2], XTx[i][1]);
-
-      // U[i][3] = XTx[i][1] - XTx[i][3];
-      U[i][3] = vsub_f32(XTx[i][1], XTx[i][3]);
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_i; i++)
-    {
-      for (int j = 0; j < inner_tile_j; j++, m++)
-      {
-        vst1_f32(outptr + m*matrix_stride, U[i][j]);
-      }
-    }
-    outptr += 2;
-  }
-#endif  // __arm_any__
-  for (; channels_remaining; channels_remaining--)
-  {
-    // Load x
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = *(x_ptrs[i][j]++);
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      XTx[0][j] = x[0][j] - x[2][j];
-      XTx[1][j] = x[1][j] + x[2][j];
-      XTx[2][j] = x[2][j] - x[1][j];
-      XTx[3][j] = x[1][j] - x[3][j];
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_i; i++)
-    {
-      U[i][0] = XTx[i][0] - XTx[i][2];
-      U[i][1] = XTx[i][1] + XTx[i][2];
-      U[i][2] = XTx[i][2] - XTx[i][1];
-      U[i][3] = XTx[i][1] - XTx[i][3];
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_i; i++)
-    {
-      for (int j = 0; j < inner_tile_j; j++, m++)
-      {
-        *(outptr + m*matrix_stride) = U[i][j];
-      }
-    }
-    outptr++;
-  }
-}
-
-}  // namespace (anonymous)
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_input_transform_4x4_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_input_transform_4x4_fp32_process_tile<true>;
-
-
-template <>
-const Tiles::TileFn Tiles::tilefn_top_padded[n_pad_top] = {
-  winograd_input_transform_4x4_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 1, 0>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 2, 0>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 3, 0>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 4, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_4x4_fp32_process_tile<true, 0, 0, 0, 4>,
-};
-
-template class InputTransform<3, 3, 4, 4, float>;
-}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
deleted file mode 100644
index 893122c..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/input_6x6_fp32.cpp
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/input.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadTop=0, int PadLeft=0, int PadBottom=0, int PadRight=0>
-void winograd_input_transform_6x6_fp32_process_tile(
-  int n_channels,
-  const float* const input_base,
-  const int input_row_stride,
-  const int input_col_stride,
-  float* const matrix_base,
-const int matrix_stride,
-     const int _pad_top,
-     const int _pad_left,
-     const int _pad_bottom,
-     const int _pad_right
-)
-{
-  const int pad_top = Specialized ? PadTop : _pad_top;
-  const int pad_left = Specialized ? PadLeft : _pad_left;
-  const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-
- constexpr int inner_tile_rows = 6;
-  constexpr int inner_tile_cols = 6;
-
-  const int cells_i = inner_tile_rows - pad_bottom;
-  const int cells_j = inner_tile_cols - pad_right;
-
-  float *outptr = matrix_base;
-
-  // Get pointers into the input tile
-  const float *x_ptrs[inner_tile_rows][inner_tile_cols];
-  for (int i = pad_top, xi = 0; i < cells_i; i++, xi++)
-  {
-    // Get a pointer into the row
-    const float* const row_ptr = input_base + xi*input_row_stride;
-
-    for (int j = pad_left, xj = 0; j < cells_j; j++, xj++)
-    {
-      x_ptrs[i][j] = row_ptr + xj*input_col_stride;
-    }
-  }
-
-  // Matrices used/computed in this kernel.
-  float x[inner_tile_rows][inner_tile_cols];
-  float XTx[inner_tile_rows][inner_tile_cols];
-  float U[inner_tile_rows][inner_tile_cols];
-  for (int i = 0; i < inner_tile_rows; i++)
-  {
-    for (int j = 0; j < inner_tile_cols; j++)
-    {
-      x[i][j] = XTx[i][j] = 0.0f;
-    }
-  }
-
-  // Perform the Winograd input transformation for each channel in the input
-  // tensor.
-  int channels_remaining = n_channels;
-#ifdef __aarch64__
-  for (; channels_remaining >= 4; channels_remaining -= 4)
-  {
-    // Matrices used/computed in this kernel
-    float32x4_t x[inner_tile_rows][inner_tile_cols];
-    float32x4_t XTx[inner_tile_rows][inner_tile_cols];
-    float32x4_t U[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      for (int j = 0; j < inner_tile_cols; j++)
-      {
-        x[i][j] = vdupq_n_f32(0.0f);
-        XTx[i][j] = vdupq_n_f32(0.0f);
-      }
-    }
-
-    // Read a 6x6 tile in the Winograd domain
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = vld1q_f32(x_ptrs[i][j]);
-        x_ptrs[i][j] += 4;
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      // XTx[0][j] =  4*x[0][j] + -5*x[2][j] +  1*x[4][j];
-      XTx[0][j] = vmlsq_n_f32(vmlaq_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
-      // XTx[1][j] = -4*x[1][j] + -4*x[2][j] +  1*x[3][j] +  1*x[4][j];
-      XTx[1][j] = vmlsq_n_f32(vaddq_f32(x[3][j], x[4][j]), vaddq_f32(x[1][j], x[2][j]), 4.0f);
-
-      // XTx[2][j] =  4*x[1][j] + -4*x[2][j] + -1*x[3][j] +  1*x[4][j];
-      XTx[2][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[3][j]), vsubq_f32(x[1][j], x[2][j]), 4.0f);
-
-      // XTx[3][j] = -2*x[1][j] + -1*x[2][j] +  2*x[3][j] +  1*x[4][j];
-      XTx[3][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[3][j], x[1][j]), 2.0f);
-
-      // XTx[4][j] =  2*x[1][j] + -1*x[2][j] + -2*x[3][j] +  1*x[4][j];
-      XTx[4][j] = vmlaq_n_f32(vsubq_f32(x[4][j], x[2][j]), vsubq_f32(x[1][j], x[3][j]), 2.0f);
-
-      // XTx[5][j] =  4*x[1][j] + -5*x[3][j] +  1*x[5][j];
-      XTx[5][j] = vmlsq_n_f32(vmlaq_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      // U[i][0] =  4*XTx[i][0] + -5*XTx[i][2] +  1*XTx[i][4];
-      U[i][0] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
-      // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] +  1*XTx[i][3] +  1*XTx[i][4];
-      U[i][1] = vmlsq_n_f32(vaddq_f32(XTx[i][3], XTx[i][4]), vaddq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
-      // U[i][2] =  4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] +  1*XTx[i][4];
-      U[i][2] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][3]), vsubq_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
-      // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] +  2*XTx[i][3] +  1*XTx[i][4];
-      U[i][3] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
-      // U[i][4] =  2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] +  1*XTx[i][4];
-      U[i][4] = vmlaq_n_f32(vsubq_f32(XTx[i][4], XTx[i][2]), vsubq_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
-      // U[i][5] =  4*XTx[i][1] + -5*XTx[i][3] +  1*XTx[i][5];
-      U[i][5] = vmlsq_n_f32(vmlaq_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_rows; i++)
-    {
-      for (int j = 0; j < inner_tile_cols; j++, m++)
-      {
-        vst1q_f32(outptr + m*matrix_stride, U[i][j]);
-      }
-    }
-    outptr += 4;
-  }
-#endif  // __aarch64__
-#ifdef __arm_any__
-  for (; channels_remaining >= 2; channels_remaining -= 2)
-  {
-    // Matrices used/computed in this kernel
-    float32x2_t x[inner_tile_rows][inner_tile_cols];
-    float32x2_t XTx[inner_tile_rows][inner_tile_cols];
-    float32x2_t U[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      for (int j = 0; j < inner_tile_cols; j++)
-      {
-        x[i][j] = vdup_n_f32(0.0f);
-        XTx[i][j] = vdup_n_f32(0.0f);
-      }
-    }
-
-    // Read a 6x6 tile in the Winograd domain
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = vld1_f32(x_ptrs[i][j]);
-        x_ptrs[i][j] += 2;
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      // XTx[0][j] =  4*x[0][j] + -5*x[2][j] +  1*x[4][j];
-      XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
-
-      // XTx[1][j] = -4*x[1][j] + -4*x[2][j] +  1*x[3][j] +  1*x[4][j];
-      XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
-
-      // XTx[2][j] =  4*x[1][j] + -4*x[2][j] + -1*x[3][j] +  1*x[4][j];
-      XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
-
-      // XTx[3][j] = -2*x[1][j] + -1*x[2][j] +  2*x[3][j] +  1*x[4][j];
-      XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
-
-      // XTx[4][j] =  2*x[1][j] + -1*x[2][j] + -2*x[3][j] +  1*x[4][j];
-      XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
-
-      // XTx[5][j] =  4*x[1][j] + -5*x[3][j] +  1*x[5][j];
-      XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      // U[i][0] =  4*XTx[i][0] + -5*XTx[i][2] +  1*XTx[i][4];
-      U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
-
-      // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] +  1*XTx[i][3] +  1*XTx[i][4];
-      U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
-      // U[i][2] =  4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] +  1*XTx[i][4];
-      U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
-
-      // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] +  2*XTx[i][3] +  1*XTx[i][4];
-      U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
-
-      // U[i][4] =  2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] +  1*XTx[i][4];
-      U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
-
-      // U[i][5] =  4*XTx[i][1] + -5*XTx[i][3] +  1*XTx[i][5];
-      U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_rows; i++)
-    {
-      for (int j = 0; j < inner_tile_cols; j++, m++)
-      {
-        vst1_f32(outptr + m*matrix_stride, U[i][j]);
-      }
-    }
-    outptr += 2;
-  }
-#endif  // __arm_any__
-  for (; channels_remaining; channels_remaining--)
-  {
-    // Load x
-    for (int i = pad_top; i < cells_i; i++)
-    {
-      for (int j = pad_left; j < cells_j; j++)
-      {
-        x[i][j] = *(x_ptrs[i][j]++);
-      }
-    }
-
-    // Compute XT . x
-    for (int j = pad_left; j < cells_j; j++)
-    {
-      XTx[0][j] =  4*x[0][j] + -5*x[2][j] +  1*x[4][j];
-      XTx[1][j] = -4*x[1][j] + -4*x[2][j] +  1*x[3][j] +  1*x[4][j];
-      XTx[2][j] =  4*x[1][j] + -4*x[2][j] + -1*x[3][j] +  1*x[4][j];
-      XTx[3][j] = -2*x[1][j] + -1*x[2][j] +  2*x[3][j] +  1*x[4][j];
-      XTx[4][j] =  2*x[1][j] + -1*x[2][j] + -2*x[3][j] +  1*x[4][j];
-      XTx[5][j] =  4*x[1][j] + -5*x[3][j] +  1*x[5][j];
-    }
-
-    // Compute U = XT . x . X
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      U[i][0] =  4*XTx[i][0] + -5*XTx[i][2] +  1*XTx[i][4];
-      U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] +  1*XTx[i][3] +  1*XTx[i][4];
-      U[i][2] =  4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] +  1*XTx[i][4];
-      U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] +  2*XTx[i][3] +  1*XTx[i][4];
-      U[i][4] =  2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] +  1*XTx[i][4];
-      U[i][5] =  4*XTx[i][1] + -5*XTx[i][3] +  1*XTx[i][5];
-    }
-
-    // Store the transformed matrix
-    for (int i = 0, m = 0; i < inner_tile_rows; i++)
-    {
-      for (int j = 0; j < inner_tile_cols; j++, m++)
-      {
-        *(outptr + m*matrix_stride) = U[i][j];
-      }
-    }
-    outptr++;
-  }
-}
-}
-
-namespace winograd
-{
-template <int k>
-using Tiles = InputTransformImplTiles<k, k, 6, 6, float>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_top_padded[n_pad_top] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 1, 0, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 1, 0, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<3>::TileFn Tiles<3>::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_generic = winograd_input_transform_6x6_fp32_process_tile<false>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_unpadded = winograd_input_transform_6x6_fp32_process_tile<true>;
-
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_top_padded[n_pad_top] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 2, 0, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_left_padded[n_pad_left] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 2, 0, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 1, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 2, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 3, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 4, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 5, 0>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 6, 0>,
-};
-
-template <>
-const Tiles<5>::TileFn Tiles<5>::tilefn_right_padded[n_pad_right] = {
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 1>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 2>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 3>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 4>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 5>,
-  winograd_input_transform_6x6_fp32_process_tile<true, 0, 0, 0, 6>,
-};
-
-template class InputTransform<3, 3, 6, 6, float>;
-template class InputTransform<5, 5, 6, 6, float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
deleted file mode 100644
index 597b074..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_3x3_fp32_process_tile(
-  const int n_channels,
-  const float* const matrix_base,
-  const int matrix_stride,
-  const float* const biases,
-  float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
-)
-{
-  constexpr int OutputTileRows = 2, OutputTileCols = 2;
-  const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-
-  const int cells_i = OutputTileRows - pad_bottom;
-  const int cells_j = OutputTileCols - pad_right;
-
-  // Construct a map to the output cells
-  float *outptrs[OutputTileRows][OutputTileCols];
-  for (int i = 0; i < cells_i; i++)
-  {
-    for (int j = 0; j < cells_j; j++)
-    {
-      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
-    }
-  }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
-
-  if (bptr)
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[4][4], FZ[4][2], f[2][2], b;
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
-        // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-        FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
-        // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-        f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
-      }
-
-      // Load the bias vector
-      b = vld1q_f32(bptr);
-      bptr += 4;
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[4][4], FZ[4][2], f[2][2], b;
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
-        // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-        FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
-        // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-        f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
-      }
-
-      // Load the bias vector
-      b = vld1_f32(bptr);
-      bptr += 2;
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif  // __arm_any__
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[4][4], FZ[4][2], f[2][2], b;
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-      }
-
-      // Load the bias
-      b = *(bptr++);
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j] + b;
-        }
-      }
-    }
-  }
-  else
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[4][4], FZ[4][2], f[2][2];
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
-
-        // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-        FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
-        // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-        f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[4][4], FZ[4][2], f[2][2];
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
-
-        // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-        FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
-
-        // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-        f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif  // __arm_any__
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[4][4], FZ[4][2], f[2][2];
-
-      // Read a 4x4 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 4; i++)
-      {
-        for (int j = 0; j < 4; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 4; i++)
-      {
-        FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
-        FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
-        f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j];
-        }
-      }
-    }
-  }
-}
-
-}  // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 4, 4, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_output_transform_2x2_3x3_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_2x2_3x3_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<3, 3, 4, 4, float>;
-}  // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
deleted file mode 100644
index 60d7181..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_2x2_5x5_fp32_process_tile(
-  const int n_channels,
-  const float* const matrix_base,
-  const int matrix_stride,
-  const float* const biases,
-  float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
-)
-{
-  constexpr int OutputTileRows = 2, OutputTileCols = 2;
-  const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-
-  const int cells_i = 2 - pad_bottom;
-  const int cells_j = 2 - pad_right;
-
-  // Construct a map to the output cells
-  float *outptrs[OutputTileRows][OutputTileCols];
-  for (int i = 0; i < cells_i; i++)
-  {
-    for (int j = 0; j < cells_j; j++)
-    {
-      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
-    }
-  }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
-
-  if (bptr)
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[6][6], FZ[6][2], f[2][2], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-        FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-        f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      b = vld1q_f32(bptr);
-      bptr += 4;
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[6][6], FZ[6][2], f[2][2], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-        FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-        f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      b = vld1_f32(bptr);
-      bptr += 2;
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif  // __arm_any__
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[6][6], FZ[6][2], f[2][2], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[1][j] =                1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-      }
-
-      // Write out the output tile
-      b = *(bptr++);
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j] + b;
-        }
-      }
-    }
-  }
-  else
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[6][6], FZ[6][2], f[2][2];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-        FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-        f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[6][6], FZ[6][2], f[2][2];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-        FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-        f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif  // __arm_any__
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[6][6], FZ[6][2], f[2][2];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 2; j++)
-      {
-        f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[1][j] =                1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j];
-        }
-      }
-    }
-  }
-}
-
-}  // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<5, 5, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_2x2_5x5_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2x2_5x5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_output_transform_2x2_5x5_fp32_process_tile<true, 1, 0>
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_2x2_5x5_fp32_process_tile<true, 0, 1>
-};
-
-template class OutputTransform<5, 5, 6, 6, float>;
-}  // namespace winograd
-
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
deleted file mode 100644
index 15cc04b..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-
-namespace
-{
-
-template <bool Specialized, int PadBottom=0, int PadRight=0>
-void winograd_output_transform_4x4_3x3_fp32_process_tile(
-  const int n_channels,
-  const float* const matrix_base,
-  const int matrix_stride,
-  const float* const biases,
-  float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
-)
-{
-  const int pad_bottom = Specialized ? PadBottom : _pad_bottom;
-  const int pad_right = Specialized ? PadRight : _pad_right;
-  constexpr int TileRows = 4, TileCols = 4;
-
-  const int cells_i = TileRows - pad_bottom;
-  const int cells_j = TileCols - pad_right;
-
-  // Construct a map to the output cells
-  float *outptrs[TileRows][TileCols];
-  for (int i = 0; i < cells_i; i++)
-  {
-    for (int j = 0; j < cells_j; j++)
-    {
-      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
-    }
-  }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
-
-  if (bptr)
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[6][6], FZ[6][4], f[4][4], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
-        // FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
-        // FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-        FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
-        // f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
-        // f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-        f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      b = vld1q_f32(bptr);
-      bptr += 4;
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[6][6], FZ[6][4], f[4][4], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
-        // FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
-        // FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-        FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
-        // f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
-        // f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-        f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      b = vld1_f32(bptr);
-      bptr += 2;
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[6][6], FZ[6][4], f[4][4], b;
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-      }
-
-      // Write out the output tile
-      b = *(bptr++);
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j] + b;
-        }
-      }
-    }
-  }
-  else
-  {
-    // For each channel of the output
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    for (; channels_remaining >= 4; channels_remaining -= 4)
-    {
-      // Matrices used and computed during this transform
-      float32x4_t F[6][6], FZ[6][4], f[4][4];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1q_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 4;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][1] = vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f);
-
-        // FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][2] = vmlaq_n_f32(vaddq_f32(F[i][1], F[i][2]), vaddq_f32(F[i][3], F[i][4]), 4.0f);
-
-        // FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-        FZ[i][3] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[1][j] = vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
-        // f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[2][j] = vmlaq_n_f32(vaddq_f32(FZ[1][j], FZ[2][j]), vaddq_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
-        // f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-        f[3][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1q_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 4;
-        }
-      }
-    }
-#endif  // __aarch64__
-#ifdef __arm_any__
-    for (; channels_remaining >= 2; channels_remaining -= 2)
-    {
-      // Matrices used and computed during this transform
-      float32x2_t F[6][6], FZ[6][4], f[4][4];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = vld1_f32(inptr + m*matrix_stride);
-        }
-      }
-      inptr += 2;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
-
-        // FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
-
-        // FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
-
-        // FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-        FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
-
-        // f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
-
-        // f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
-
-        // f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-        f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          vst1_f32(outptrs[i][j], f[i][j]);
-          outptrs[i][j] += 2;
-        }
-      }
-    }
-#endif
-    for (; channels_remaining; channels_remaining--)
-    {
-      // Matrices used and computed during this transform
-      float F[6][6], FZ[6][4], f[4][4];
-
-      // Read a 6x6 tile in the Winograd domain
-      for (int i = 0, m = 0; i < 6; i++)
-      {
-        for (int j = 0; j < 6; j++, m++)
-        {
-          F[i][j] = *(inptr + m*matrix_stride);
-        }
-      }
-      inptr++;
-
-      // Compute the matrix F Z
-      for (int i = 0; i < 6; i++)
-      {
-        FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
-        FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
-        FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
-        FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
-      }
-
-      // Compute the output tile f = ZT F Z
-      for (int j = 0; j < 4; j++)
-      {
-        f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
-        f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
-        f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
-        f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
-      }
-
-      // Write out the output tile
-      for (int i = 0; i < cells_i; i++)
-      {
-        for (int j = 0; j < cells_j; j++)
-        {
-          *(outptrs[i][j]++) = f[i][j];
-        }
-      }
-    }
-  }
-}
-
-}  // namespace (anonymous)
-
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<3, 3, 6, 6, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_generic = winograd_output_transform_4x4_3x3_fp32_process_tile<false>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4x4_3x3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_bottom_padded[n_pad_bottom] = {
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 1, 0>,
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 2, 0>,
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 3, 0>,
-};
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 1>,
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 2>,
-  winograd_output_transform_4x4_3x3_fp32_process_tile<true, 0, 3>,
-};
-
-template class OutputTransform<3, 3, 6, 6, float>;
-}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
deleted file mode 100644
index 85cf418..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
-  template <>
-  template <>
-  void WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const float *inptrs[kernel_cols];
-    for (int j = 0; j < kernel_cols; j++)
-    {
-      inptrs[j] = input + j*weight_col_stride;
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[kernel_cols], V[inner_tile_cols];
-
-        // Read weights
-        for (int j = 0; j < kernel_cols; j++)
-        {
-          w[j] = *(inptrs[j]++);
-        }
-
-        // Compute V = w WT
-        V[0] = (w[0]*-1) / 36.0f;
-        V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
-        V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
-        V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
-        V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
-        V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
-        V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
-        V[7] = (w[6]*1) / 1.0f;
-
-        // Store the transformed weights
-        for (int j = 0; j < inner_tile_cols; j++)
-        {
-          *(outptr + j*matrix_stride) = V[j];
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template <>
-  template <>
-  void WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Redirect to the 1xN implementation
-    WinogradGEMM<1, 2, 1, 7>::template WeightsTransform<float>::execute(
-      n_output_channels, n_input_channels, input, output, matrix_stride,
-      matrix_row_stride
-    );
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template struct WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>;
-  template struct WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
deleted file mode 100644
index 6c71461..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_3x3_fp32.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
-  template <>
-  template <>
-  void WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    constexpr int inner_tile_i = 4;
-    constexpr int inner_tile_j = 4;
-
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const auto weight_row_stride = 3 * weight_col_stride;
-    const float *inptrs[3][3];
-    for (int i = 0; i < 3; i++)
-    {
-      for (int j = 0; j < 3; j++)
-      {
-        inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
-      }
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-#ifdef __aarch64__
-      for (; channels_remaining >= 4; channels_remaining -= 4)
-      {
-        // Matrices used and computed in this kernel
-        float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = vld1q_f32(inptrs[i][j]);
-            inptrs[i][j] += 4;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          Ww[0][j] = w[0][j];
-
-          // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
-          Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
-          // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
-          Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
-          Ww[3][j] = w[2][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < inner_tile_i; i++)
-        {
-          V[i][0] = Ww[i][0];
-
-          // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
-          V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
-          // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
-          V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
-          V[i][3] = Ww[i][2];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < inner_tile_i; i++)
-        {
-          for (int j = 0; j < inner_tile_j; j++, m++)
-          {
-            vst1q_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 4;
-      }
-#endif  // __aarch64__
-#ifdef __arm_any__
-      for (; channels_remaining >= 2; channels_remaining -= 2)
-      {
-        // Matrices used and computed in this kernel
-        float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = vld1_f32(inptrs[i][j]);
-            inptrs[i][j] += 2;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          Ww[0][j] = w[0][j];
-
-          // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
-          Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
-          // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
-          Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
-
-          Ww[3][j] = w[2][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < inner_tile_i; i++)
-        {
-          V[i][0] = Ww[i][0];
-
-          // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
-          V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
-          // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
-          V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
-
-          V[i][3] = Ww[i][2];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < inner_tile_i; i++)
-        {
-          for (int j = 0; j < inner_tile_j; j++, m++)
-          {
-            vst1_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 2;
-      }
-#endif  // __arm_any__
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = *(inptrs[i][j]++);
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          Ww[0][j] = w[0][j];
-          Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
-          Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
-          Ww[3][j] = w[2][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < inner_tile_i; i++)
-        {
-          V[i][0] = Ww[i][0];
-          V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
-          V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
-          V[i][3] = Ww[i][2];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < inner_tile_i; i++)
-        {
-          for (int j = 0; j < inner_tile_j; j++, m++)
-          {
-            *(outptr + m*matrix_stride) = V[i][j];
-          }
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    const int channel_prod = shape.n_input_channels * shape.n_output_channels;
-    return 2 * 18 * channel_prod;
-  }
-
-  template struct WinogradGEMM<2, 2, 3, 3>::WeightsTransform<float>;
-}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
deleted file mode 100644
index 2f4f6e1..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2x2_5x5_fp32.cpp
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
-  template <>
-  template <>
-  void WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const auto weight_row_stride = 5 * weight_col_stride;
-    const float *inptrs[5][5];
-    for (int i = 0; i < 5; i++)
-    {
-      for (int j = 0; j < 5; j++)
-      {
-        inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
-      }
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-#ifdef __aarch64__
-      for (; channels_remaining >= 4; channels_remaining -= 4)
-      {
-        // Matrices used and computed in this kernel
-        float32x4_t w[5][5], Ww[6][5], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 5; i++)
-        {
-          for (int j = 0; j < 5; j++)
-          {
-            w[i][j] = vld1q_f32(inptrs[i][j]);
-            inptrs[i][j] += 4;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 5; j++)
-        {
-          // Ww[0][j] = w[0][j]/4.0f;
-          Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
-
-          // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
-          Ww[1][j] = vmulq_n_f32(
-            vaddq_f32(
-              vaddq_f32(
-                vaddq_f32(w[1][j], w[0][j]),
-                vaddq_f32(w[3][j], w[2][j])
-              ),
-              w[4][j]
-            ),
-            -1.0f/6.0f
-          );
-
-          // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
-          // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
-          Ww[2][j] = vmulq_n_f32(
-            vsubq_f32(
-              vaddq_f32(
-                vsubq_f32(w[1][j], w[0][j]),
-                vsubq_f32(w[3][j], w[2][j])
-              ),
-              w[4][j]
-            ),
-            1.0f/6.0f
-          );
-
-          // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
-          Ww[3][j] = vmulq_n_f32(
-            vmlaq_n_f32(
-              vaddq_f32(
-                vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
-                vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
-              ),
-              w[4][j], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
-          Ww[4][j] = vmulq_n_f32(
-            vmlaq_n_f32(
-              vaddq_f32(
-                vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
-                vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
-              ),
-              w[4][j], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // Ww[5][j] = w[4][j];
-          Ww[5][j] = w[4][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          // V[i][0] = Ww[i][0]/4.0f;
-          V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
-
-          // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
-          V[i][1] = vmulq_n_f32(
-            vaddq_f32(
-              vaddq_f32(
-                vaddq_f32(Ww[i][1], Ww[i][0]),
-                vaddq_f32(Ww[i][3], Ww[i][2])
-              ),
-              Ww[i][4]
-            ),
-            -1.0f/6.0f
-          );
-
-          // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
-          // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
-          V[i][2] = vmulq_n_f32(
-            vsubq_f32(
-              vaddq_f32(
-                vsubq_f32(Ww[i][1], Ww[i][0]),
-                vsubq_f32(Ww[i][3], Ww[i][2])
-              ),
-              Ww[i][4]
-            ),
-            1.0f/6.0f
-          );
-
-          // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][3] = vmulq_n_f32(
-            vmlaq_n_f32(
-              vaddq_f32(
-                vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
-                vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
-              ),
-              Ww[i][4], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][4] = vmulq_n_f32(
-            vmlaq_n_f32(
-              vaddq_f32(
-                vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
-                vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
-              ),
-              Ww[i][4], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // V[i][5] = Ww[i][4];
-          V[i][5] = Ww[i][4];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            vst1q_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 4;
-      }
-#endif  // __aarch64__
-#ifdef __arm_any__
-      for (; channels_remaining >= 2; channels_remaining -= 2)
-      {
-        // Matrices used and computed in this kernel
-        float32x2_t w[5][5], Ww[6][5], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 5; i++)
-        {
-          for (int j = 0; j < 5; j++)
-          {
-            w[i][j] = vld1_f32(inptrs[i][j]);
-            inptrs[i][j] += 2;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 5; j++)
-        {
-          // Ww[0][j] = w[0][j]/4.0f;
-          Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
-
-          // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
-          Ww[1][j] = vmul_n_f32(
-            vadd_f32(
-              vadd_f32(
-                vadd_f32(w[1][j], w[0][j]),
-                vadd_f32(w[3][j], w[2][j])
-              ),
-              w[4][j]
-            ),
-            -1.0f/6.0f
-          );
-
-          // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
-          // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
-          Ww[2][j] = vmul_n_f32(
-            vsub_f32(
-              vadd_f32(
-                vsub_f32(w[1][j], w[0][j]),
-                vsub_f32(w[3][j], w[2][j])
-              ),
-              w[4][j]
-            ),
-            1.0f/6.0f
-          );
-
-          // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
-          Ww[3][j] = vmul_n_f32(
-            vmla_n_f32(
-              vadd_f32(
-                vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
-                vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
-              ),
-              w[4][j], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
-          Ww[4][j] = vmul_n_f32(
-            vmla_n_f32(
-              vadd_f32(
-                vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
-                vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
-              ),
-              w[4][j], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // Ww[5][j] = w[4][j];
-          Ww[5][j] = w[4][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          // V[i][0] = Ww[i][0]/4.0f;
-          V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
-
-          // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
-          V[i][1] = vmul_n_f32(
-            vadd_f32(
-              vadd_f32(
-                vadd_f32(Ww[i][1], Ww[i][0]),
-                vadd_f32(Ww[i][3], Ww[i][2])
-              ),
-              Ww[i][4]
-            ),
-            -1.0f/6.0f
-          );
-
-          // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
-          // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
-          V[i][2] = vmul_n_f32(
-            vsub_f32(
-              vadd_f32(
-                vsub_f32(Ww[i][1], Ww[i][0]),
-                vsub_f32(Ww[i][3], Ww[i][2])
-              ),
-              Ww[i][4]
-            ),
-            1.0f/6.0f
-          );
-
-          // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][3] = vmul_n_f32(
-            vmla_n_f32(
-              vadd_f32(
-                vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
-                vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
-              ),
-              Ww[i][4], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][4] = vmul_n_f32(
-            vmla_n_f32(
-              vadd_f32(
-                vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
-                vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
-              ),
-              Ww[i][4], 2.0f
-            ),
-            1.0f/3.0f
-          );
-
-          // V[i][5] = Ww[i][4];
-          V[i][5] = Ww[i][4];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            vst1_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 2;
-      }
-#endif  // __arm_any__
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[5][5], Ww[6][5], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 5; i++)
-        {
-          for (int j = 0; j < 5; j++)
-          {
-            w[i][j] = *(inptrs[i][j]++);
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 5; j++)
-        {
-          Ww[0][j] = w[0][j]/4.0f;
-          Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
-          Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
-          Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
-          Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
-          Ww[5][j] = w[4][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          V[i][0] = Ww[i][0]/4.0f;
-          V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
-          V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
-          V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
-          V[i][5] = Ww[i][4];
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            *(outptr + m*matrix_stride) = V[i][j];
-          }
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    return 0;  // TODO
-  }
-
-  template class WinogradGEMM<2, 2, 5, 5>::WeightsTransform<float>;
-}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
deleted file mode 100644
index 2f14e20..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
-  template <>
-  template <>
-  void WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const float *inptrs[kernel_cols];
-    for (int j = 0; j < kernel_cols; j++)
-    {
-      inptrs[j] = input + j*weight_col_stride;
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[kernel_cols], V[inner_tile_cols];
-
-        // Read weights
-        for (int j = 0; j < kernel_cols; j++)
-        {
-          w[j] = *(inptrs[j]++);
-        }
-
-        // Compute V = w WT
-        V[0] = (w[0]*-1) / 36;
-        V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
-        V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
-        V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
-        V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
-        V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
-        V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
-        V[7] = (w[4]*1) / 1;
-
-        // Store the transformed weights
-        for (int j = 0; j < inner_tile_cols; j++)
-        {
-          *(outptr + j*matrix_stride) = V[j];
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template <>
-  template <>
-  void WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Redirect to the 1xN implementation
-    WinogradGEMM<1, 4, 1, 5>::template WeightsTransform<float>::execute(
-      n_output_channels, n_input_channels, input, output, matrix_stride,
-      matrix_row_stride
-    );
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template struct WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>;
-  template struct WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
deleted file mode 100644
index a56a475..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4x4_3x3_fp32.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-namespace winograd
-{
-  /* Float implementation for kernel transform F(4x4, 3x3) */
-  template <>
-  template <>
-  void WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const auto weight_row_stride = 3 * weight_col_stride;
-    const float *inptrs[3][3];
-    for (int i = 0; i < 3; i++)
-    {
-      for (int j = 0; j < 3; j++)
-      {
-        inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
-      }
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-#ifdef __aarch64__
-      for (; channels_remaining >= 4; channels_remaining -= 4)
-      {
-        // Matrices used and computed in this kernel
-        float32x4_t w[3][3], Ww[6][3], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = vld1q_f32(inptrs[i][j]);
-            inptrs[i][j] += 4;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          // Ww[0][j] =  6*w[0][j];
-          Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
-
-          // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
-          Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
-          // Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
-          Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
-          // Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
-          Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
-          // Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
-          Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
-          // Ww[5][j] = 24*w[2][j];
-          Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          const float recip576 = 1.0f / 576.0f;
-
-          // V[i][0] =  6*Ww[i][0];
-          V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
-
-          // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
-          V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
-          // V[i][2] = -4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2];
-          V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
-          // V[i][3] =  1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2];
-          V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
-          // V[i][4] =  1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2];
-          V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
-          // V[i][5] = 24*Ww[i][2];
-          V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            vst1q_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 4;
-      }
-#endif  // __aarch64__
-#ifdef __arm_any__
-      for (; channels_remaining >= 2; channels_remaining -= 2)
-      {
-        // Matrices used and computed in this kernel
-        float32x2_t w[3][3], Ww[6][3], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = vld1_f32(inptrs[i][j]);
-            inptrs[i][j] += 2;
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          // Ww[0][j] =  6*w[0][j];
-          Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
-
-          // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
-          Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
-
-          // Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
-          Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
-
-          // Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
-          Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
-          // Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
-          Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
-
-          // Ww[5][j] = 24*w[2][j];
-          Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          const float recip576 = 1.0f / 576.0f;
-
-          // V[i][0] =  6*Ww[i][0];
-          V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
-
-          // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
-          V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
-
-          // V[i][2] = -4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2];
-          V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
-
-          // V[i][3] =  1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2];
-          V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
-          // V[i][4] =  1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2];
-          V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
-
-          // V[i][5] = 24*Ww[i][2];
-          V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            vst1_f32(outptr + m*matrix_stride, V[i][j]);
-          }
-        }
-        outptr += 2;
-      }
-#endif  // __arm_any__
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[3][3], Ww[6][3], V[6][6];
-
-        // Read weights
-        for (int i = 0; i < 3; i++)
-        {
-          for (int j = 0; j < 3; j++)
-          {
-            w[i][j] = *(inptrs[i][j]++);
-          }
-        }
-
-        // Compute the matrix W w
-        for (int j = 0; j < 3; j++)
-        {
-          Ww[0][j] =  6*w[0][j];
-          Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
-          Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
-          Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
-          Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
-          Ww[5][j] = 24*w[2][j];
-        }
-
-        // Compute V = W w WT
-        for (int i = 0; i < 6; i++)
-        {
-          V[i][0] = ( 6*Ww[i][0]) / 576.0;
-          V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
-          V[i][2] = (-4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
-          V[i][3] = ( 1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2]) / 576.0;
-          V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2]) / 576.0;
-          V[i][5] = (24*Ww[i][2]) / 576.0;
-        }
-
-        // Store the transformed weights
-        for (int i = 0, m = 0; i < 6; i++)
-        {
-          for (int j = 0; j < 6; j++, m++)
-          {
-            *(outptr + m*matrix_stride) = V[i][j];
-          }
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    const int channel_prod = shape.n_input_channels * shape.n_output_channels;
-    return 9 * 16 * channel_prod;
-  }
-
-  template struct WinogradGEMM<4, 4, 3, 3>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
deleted file mode 100644
index c560aa8..0000000
--- a/src/core/NEON/kernels/convolution/winograd/transforms/weights_6_3_fp32.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
-
-
-namespace winograd
-{
-  template <>
-  template <>
-  void WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Get pointers to each cell of the weight tensor
-    const auto weight_col_stride = n_input_channels * n_output_channels;
-    const float *inptrs[3];
-    for (int j = 0; j < 3; j++)
-    {
-      inptrs[j] = input + j*weight_col_stride;
-    }
-
-    // For each input channel
-    for (int ic = 0; ic < n_input_channels; ic++)
-    {
-      float *outptr = output + ic * matrix_row_stride;
-
-      // For each output channel
-      int channels_remaining = n_output_channels;
-      for (; channels_remaining; channels_remaining--)
-      {
-        // Matrices used and computed in this kernel
-        float w[3], V[inner_tile_cols];
-
-        // Read weights
-        for (int j = 0; j < 3; j++)
-        {
-          w[j] = *(inptrs[j]++);
-        }
-
-        // Compute V = w WT
-        V[0] = (w[0]*-1) / 36.0f;
-        V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
-        V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
-        V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
-        V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
-        V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
-        V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
-        V[7] = (w[2]*1) / 1;
-
-        // Store the transformed weights
-        for (int j = 0; j < inner_tile_cols; j++)
-        {
-          *(outptr + j*matrix_stride) = V[j];
-        }
-        outptr++;
-      }
-    }
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template <>
-  template <>
-  void WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::execute(
-    const int n_output_channels,
-    const int n_input_channels,
-    const float* const input,  // NOTE: Data in HWIO order
-    float* const output,
-    const int matrix_stride,
-    const int matrix_row_stride
-  )
-  {
-    // Redirect to the 1xN implementation
-    WinogradGEMM<1, 6, 1, 3>::template WeightsTransform<float>::execute(
-      n_output_channels, n_input_channels, input, output, matrix_stride,
-      matrix_row_stride
-    );
-  }
-
-  template <>
-  template <>
-  int WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
-  {
-    (void) shape;
-    return 0;  // TODO
-  }
-
-  template struct WinogradGEMM<1, 6, 1, 3>::WeightsTransform<float>;
-  template struct WinogradGEMM<6, 1, 3, 1>::WeightsTransform<float>;
-}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd.cpp
similarity index 64%
rename from src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
rename to src/core/NEON/kernels/convolution/winograd/winograd.cpp
index a7de2fd..226f303 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,14 +22,13 @@
  * SOFTWARE.
  */
 #include <cstring>
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
+#include "winograd.hpp"
 using namespace winograd;
 
 /** Get the output shape of a convolution. */
-template <int kr, int kc, int itr, int itc>
-template <typename TOut, typename TIn>
-Tensor4DShape WinogradGEMM<kr, kc, itr, itc>::Convolution<TOut, TIn>::get_output_shape(
+template <int kr, int kc, int itr, int itc, WinogradRoots R>
+template <typename TOut, typename TIn, typename TInGEMM, typename TOutGEMM>
+Tensor4DShape WinogradGEMM<kr, kc, itr, itc, R>::Convolution<TOut, TIn, TInGEMM, TOutGEMM>::get_output_shape(
   const KernelShape &kernel_shape,
   const Tensor4DShape &in_shape,
   const PaddingType padding
@@ -47,9 +46,9 @@
 /* Get the memory required to transform the kernel.
  */
 template <int kernel_rows, int kernel_cols,
-          int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_transform_working_size(const KernelShape &shape)
+          int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_transform_working_size(const KernelShape &shape)
 {
   if (shape.ordering == HWIO)
   {
@@ -68,17 +67,17 @@
 /** Get the memory required to store the kernel transformed into the
  * Winograd domain.
  */
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_storage_size(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_storage_size(const KernelShape &shape)
 {
   return N_GEMMS * get_kernel_matrix_size(shape);
 }
 
 
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_storage_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_storage_size(
   const KernelShape &kernel_shape,
   const Tensor4DShape &input_shape,
   const PaddingType padding
@@ -88,9 +87,9 @@
 }
 
 
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_storage_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_storage_size(
   const KernelShape &kernel_shape,
   const Tensor4DShape &input_shape,
   const PaddingType padding
@@ -102,9 +101,9 @@
 
 /** Get the memory required to apply a Winograd operator to some input.
  */
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_working_space_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_working_space_size(
   const KernelShape &kernel_shape,
   const Tensor4DShape &input_shape,
   const PaddingType padding_type
@@ -139,20 +138,20 @@
 
 /* Get the memory required by a single "input" matrix.
  */
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_matrix_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_matrix_size(
   const KernelShape &kernel_shape,
   const Tensor4DShape &input_shape,
   const PaddingType padding_type
 )
 {
-  return get_input_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TIn);
+  return get_input_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TGIn);
 }
 
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_input_matrix_stride(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_input_matrix_stride(
   const KernelShape &kernel_shape,
   const Tensor4DShape &input_shape,
   const PaddingType padding_type
@@ -171,21 +170,21 @@
 
 /* Get the memory required by a single "output" matrix.
  */
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_matrix_size(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_matrix_size(
     const KernelShape &kernel_shape,
     const Tensor4DShape &input_shape,
     const PaddingType padding_type
 )
 {
-  return get_output_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TOut);
+  return get_output_matrix_stride(kernel_shape, input_shape, padding_type) * sizeof(TGOut);
 }
 
 
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_output_matrix_stride(
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_output_matrix_stride(
     const KernelShape &kernel_shape,
     const Tensor4DShape &input_shape,
     const PaddingType padding_type
@@ -204,16 +203,16 @@
 
 /* Get the memory required by a single "kernel" matrix.
  */
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_matrix_size(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+size_t WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_matrix_size(const KernelShape &shape)
 {
-  return sizeof(TIn) * get_kernel_matrix_stride(shape);
+  return sizeof(TGIn) * get_kernel_matrix_stride(shape);
 }
 
-template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols>
-template <typename TOut, typename TIn>
-int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols>::Convolution<TOut, TIn>::get_kernel_matrix_stride(const KernelShape &shape)
+template <int kernel_rows, int kernel_cols, int output_tile_rows, int output_tile_cols, WinogradRoots roots>
+template <typename TOut, typename TIn, typename TGIn, typename TGOut>
+int WinogradGEMM<kernel_rows, kernel_cols, output_tile_rows, output_tile_cols, roots>::Convolution<TOut, TIn, TGIn, TGOut>::get_kernel_matrix_stride(const KernelShape &shape)
 {
   const int K = shape.n_input_channels;
   const int N = roundup(shape.n_output_channels, N_BLOCK);
@@ -222,19 +221,16 @@
 
 
 // Instantiate required implementations
-template class WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>;
-template class WinogradGEMM<4, 4, 3, 3>::Convolution<float, float>;
+template class WinogradGEMM<2, 2, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<4, 4, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
 
-template class WinogradGEMM<1, 6, 1, 3>::Convolution<float, float>;
-template class WinogradGEMM<6, 1, 3, 1>::Convolution<float, float>;
+template class WinogradGEMM<1, 6, 1, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<6, 1, 3, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
 
-template class WinogradGEMM<2, 2, 5, 5>::Convolution<float, float>;
+template class WinogradGEMM<2, 2, 5, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>;
 
-template class WinogradGEMM<1, 4, 1, 5>::Convolution<float, float>;
-template class WinogradGEMM<4, 1, 5, 1>::Convolution<float, float>;
+template class WinogradGEMM<1, 4, 1, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<4, 1, 5, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
 
-template class WinogradGEMM<1, 2, 1, 7>::Convolution<float, float>;
-template class WinogradGEMM<2, 1, 7, 1>::Convolution<float, float>;
-
-
-
+template class WinogradGEMM<1, 2, 1, 7, WinogradRoots::Integers>::Convolution<float, float, float, float>;
+template class WinogradGEMM<2, 1, 7, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>;
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
new file mode 100644
index 0000000..fcbd21f
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "winograd.hpp"
+#include "padding.hpp"
+
+#define MEMBERFN(RTYPE) template <\
+  int InnerTileRows, int InnerTileCols,\
+  typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE InputTransform<InnerTileRows, InnerTileCols, TIn, TOut, Roots>
+
+
+#define Nx1MEMBERFN(RTYPE) template <\
+  int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE InputTransform<InnerTileRows, 1, TIn, TOut, Roots>
+
+namespace winograd
+{
+
+MEMBERFN()::InputTransform(
+  const int kernel_rows,
+  const int kernel_cols,
+  const int n_batches,
+  const int n_rows,
+  const int n_cols,
+  const int n_channels,
+  const int padding_top,
+  const int padding_left,
+  const int padding_bottom,
+  const int padding_right
+) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
+    _inptr(nullptr), _outptr(nullptr),
+    _overlap_rows(kernel_rows - 1), _overlap_cols(kernel_cols - 1),
+    _padding_top(padding_top), _padding_left(padding_left), _padding_bottom(padding_bottom), _padding_right(padding_right),
+    _tiles_M(iceildiv(padding_top + n_rows + padding_bottom - kernel_rows + 1, InnerTileRows - kernel_rows + 1)),
+    _tiles_N(iceildiv(padding_left + n_cols + padding_right - kernel_cols + 1, InnerTileCols - kernel_cols + 1)),
+    _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0),
+    _in_col_stride(0), _in_row_stride(0), _in_batch_stride(0),
+    _working_space_col_stride(n_channels),
+    _working_space_row_stride(InnerTileCols * _working_space_col_stride),
+    _working_space(nullptr)
+{
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr)
+{
+  set_input_tensor(inptr, _n_channels);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
+{
+  set_input_tensor(inptr, _n_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
+{
+  set_input_tensor(inptr, _n_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+  _inptr = static_cast<const TIn *>(inptr);
+  _in_batch_stride = ldbatch;
+  _in_row_stride = ldrow;
+  _in_col_stride = ldcol;
+}
+
+MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
+{
+  _outptr = static_cast<TOut *>(mptr);
+  _matrix_stride = ldmatrix;
+  _matrix_row_stride = ldrow;
+  _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
+}
+
+Nx1MEMBERFN()::InputTransform(
+  const int kernel_rows,
+  const int kernel_cols,
+  const int n_batches,
+  const int n_rows,
+  const int n_cols,
+  const int n_channels,
+  const int padding_top,
+  const int padding_left,
+  const int padding_bottom,
+  const int padding_right
+) : InputTransform<1, InnerTileRows, TIn, TOut, Roots>::InputTransform(
+    /* Transpose rows and columns */
+    kernel_cols, kernel_rows, n_batches, n_cols, n_rows, n_channels,
+    padding_left, padding_top, padding_right, padding_bottom
+  )
+{
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr)
+{
+  set_input_tensor(inptr, this->_n_channels);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol)
+{
+  set_input_tensor(inptr, this->_n_cols * ldcol, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol)
+{
+  set_input_tensor(inptr, this->_n_rows * ldrow, ldrow, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+  // Transpose row and column strides
+  Base::set_input_tensor(inptr, ldbatch, ldcol, ldrow);
+}
+
+MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
+{
+  return sizeof(TIn) * InnerTileRows * _working_space_row_stride * nthreads;
+}
+
+MEMBERFN(void)::set_working_space(void * const buffer)
+{
+  _working_space = static_cast<TIn *>(buffer);
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
+{
+  return iceildiv(_n_channels, WINDOW_BLOCK);
+}
+
+MEMBERFN(void)::run(
+  const unsigned int start,
+  const unsigned int stop,
+  const unsigned int threadid
+)
+{
+  // Determine the channels on which to work
+  if (start >= get_window())
+  {
+    return;  // No work to do beyond the end of the window
+  }
+  const unsigned int start_channel = start * WINDOW_BLOCK;
+  const unsigned int stop_channel = std::min<unsigned int>(_n_channels , stop * WINDOW_BLOCK);
+  const unsigned int n_channels = stop_channel - start_channel;
+
+  // Loop over batches
+  for (int batch = 0; batch < _n_batches; batch++)
+  {
+    const TIn* const inptr_batch = _inptr + start_channel + batch*_in_batch_stride;
+    TOut* const outptr_batch = _outptr + start_channel + batch*_matrix_batch_stride;
+
+    // Loop over rows of tiles
+    for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
+    {
+      // Compute the starting and ending row of pixels within the row of tiles,
+      // hence compute the padding to apply to the top and bottom of each tile.
+      const int row_top = tile_i * (InnerTileRows - _overlap_rows) - _padding_top;
+      const int row_bottom = row_top + InnerTileRows;
+      const int row_pad_top = std::max(0, _padding_top - tile_i * (InnerTileRows - _overlap_rows));
+      const int row_pad_bottom = std::max(0, row_bottom - _n_rows);
+
+      // Get a pointer to the start of the row.
+      const int row_offset = std::min(0, row_pad_top - _padding_top);
+      const TIn* const inptr_row = inptr_batch + _in_row_stride*(row_offset + tile_i*(InnerTileRows - _overlap_rows));
+      TOut* const outptr_row = outptr_batch + tile_i*_tiles_N*_matrix_row_stride;
+
+      // Loop over tiles within the row
+      for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
+      {
+        // Compute the starting and ending column of pixels within the tile,
+        // hence compute the padding to apply to the left and right of the
+        // tile.
+        const int tile_left = tile_j * (InnerTileCols - _overlap_cols) - _padding_left;
+        const int tile_right = tile_left + InnerTileCols;
+        const int tile_pad_left = std::max(0, _padding_left - tile_j * (InnerTileCols - _overlap_cols));
+        const int tile_pad_right = std::max(0, tile_right - _n_cols);
+
+        // Get a pointer to the start of the tile.
+        const int col_offset = std::min(0, tile_pad_left - _padding_left);
+        const TIn* const inptr_tile = inptr_row + _in_col_stride*(col_offset + tile_j*(InnerTileCols - _overlap_cols));
+        TOut* const outptr_tile = outptr_row + tile_j * _matrix_row_stride;
+
+        // Transform the tile, applying padding if necessary.
+        if (row_pad_top || tile_pad_left || row_pad_bottom || tile_pad_right)
+        {
+          transform_padded_tile(
+            threadid, n_channels, outptr_tile, inptr_tile,
+            row_pad_top, tile_pad_left, row_pad_bottom, tile_pad_right
+          );
+        }
+        else
+        {
+          transform_unpadded_tile(threadid, n_channels, outptr_tile, inptr_tile);
+        }
+      }
+    }
+  }
+}
+
+MEMBERFN(void)::transform_unpadded_tile(
+  const unsigned int /* threadid unused */,
+  const int n_channels,
+  TOut * const outptr,
+  const TIn * const inptr
+)
+{
+  transform_tile(
+    n_channels, inptr, _in_row_stride, _in_col_stride, outptr, _matrix_stride
+  );
+}
+
+MEMBERFN(void)::transform_padded_tile(
+  const unsigned int threadid,
+  const int n_channels,
+  TOut * const outptr,
+  const TIn * const inptr,
+  const int padding_top,
+  const int padding_left,
+  const int padding_bottom,
+  const int padding_right
+)
+{
+  padding::copy_and_pad_tile(
+    InnerTileRows, InnerTileCols, n_channels,
+    inptr, _in_row_stride, _in_col_stride,
+    static_cast<TIn *>(get_working_space(threadid)), _working_space_row_stride, _working_space_col_stride,
+    padding_top, padding_left, padding_bottom, padding_right
+  );
+
+  transform_tile(
+    n_channels, static_cast<const TIn *>(get_working_space(threadid)),
+    _working_space_row_stride, _working_space_col_stride,
+    outptr, _matrix_stride
+  );
+}
+
+MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
+{
+  return _working_space + InnerTileRows * _working_space_row_stride * threadid;
+}
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..5040ec1
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "input.hpp"
+
+namespace winograd
+{
+
+template <>
+void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* const input_base,
+  const int,  // We don't need to stride over rows
+  const int input_col_stride,
+  float* outptr,
+  const int matrix_stride
+)
+{
+  constexpr int inner_tile_cols = 8;
+
+  // Get pointers into the input tile
+  const float *x_ptrs[inner_tile_cols];
+  for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+  {
+    x_ptrs[j] = input_base + xj*input_col_stride;
+  }
+
+  // Vectors used/computed in this kernel.
+  float x[inner_tile_cols];
+  float U[inner_tile_cols];
+
+  for (int j = 0; j < inner_tile_cols; j++)
+  {
+    x[j] = 0.0f;
+  }
+
+  // Perform the Winograd input transformation for each channel in the input
+  // tensor.
+  int channels_remaining = n_channels;
+#ifdef _arm_any_
+  for (; channels_remaining >= 4; channels_remaining -= 4)
+  {
+    float32x4_t x[inner_tile_cols], U[inner_tile_cols];
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[j] = vdupq_n_f32(0.0f);
+    }
+
+    // Load x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[j] = vld1q_f32(x_ptrs[j]);
+      x_ptrs[j] += 4;
+    }
+
+    // Compute U = x . X
+    U[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+    U[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+    U[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+    U[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+    U[4] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+    U[5] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+    U[6] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+    U[7] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+    // Store the transformed vector
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      vst1q_f32(outptr + j*matrix_stride, U[j]);
+    }
+    outptr += 4;
+  }
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    float32x2_t x[inner_tile_cols], U[inner_tile_cols];
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[j] = vdup_n_f32(0.0f);
+    }
+
+    // Load x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[j] = vld1_f32(x_ptrs[j]);
+      x_ptrs[j] += 2;
+    }
+
+    // Compute U = x . X
+    U[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 49), x[4], -14), x[0], -36);
+    U[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[2], 36), x[3], 13), x[4], -13), x[1], -36), x[5], -1);
+    U[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[5], 1), x[2], 36), x[1], 36), x[4], -13), x[3], -13);
+    U[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 20), x[2], 9), x[5], -2), x[4], -10), x[1], -18);
+    U[4] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 18), x[2], 9), x[5], 2), x[4], -10), x[3], -20);
+    U[5] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[3], 15), x[2], 4), x[5], -3), x[4], -5), x[1], -12);
+    U[6] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[6], 1), x[1], 12), x[2], 4), x[5], 3), x[4], -5), x[3], -15);
+    U[7] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(x[7], 1), x[3], 49), x[5], -14), x[1], -36);
+
+    // Store the transformed vector
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      vst1_f32(outptr + j*matrix_stride, U[j]);
+    }
+    outptr += 2;
+  }
+#endif  // _arm_any_
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Load x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[j] = *(x_ptrs[j]++);
+    }
+
+    // Compute U = x . X
+    U[0] = x[0]*-36 + x[4]*-14 + x[2]*49 + x[6]*1;
+    U[1] = x[5]*-1 + x[1]*-36 + x[4]*-13 + x[3]*13 + x[2]*36 + x[6]*1;
+    U[2] = x[3]*-13 + x[4]*-13 + x[1]*36 + x[2]*36 + x[5]*1 + x[6]*1;
+    U[3] = x[1]*-18 + x[4]*-10 + x[5]*-2 + x[2]*9 + x[3]*20 + x[6]*1;
+    U[4] = x[3]*-20 + x[4]*-10 + x[5]*2 + x[2]*9 + x[1]*18 + x[6]*1;
+    U[5] = x[1]*-12 + x[4]*-5 + x[5]*-3 + x[2]*4 + x[3]*15 + x[6]*1;
+    U[6] = x[3]*-15 + x[4]*-5 + x[5]*3 + x[2]*4 + x[1]*12 + x[6]*1;
+    U[7] = x[1]*-36 + x[5]*-14 + x[3]*49 + x[7]*1;
+
+    // Store the transformed vector
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      *(outptr + j*matrix_stride) = U[j];
+    }
+    outptr++;
+  }
+}
+
+template class InputTransform<1, 8, float, float, WinogradRoots::Integers>;
+template class InputTransform<8, 1, float, float, WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..9393785
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "input.hpp"
+#include "arm.hpp"
+
+namespace winograd
+{
+
+template <>
+void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* const input_base,
+  const int input_row_stride,
+  const int input_col_stride,
+  float* outptr,
+  const int matrix_stride
+)
+{
+  constexpr int inner_tile_rows = 4, inner_tile_cols = 4;
+
+  // Get pointers into the input tile
+  const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+  for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+  {
+    // Get a pointer into the row
+    const float* const row_ptr = input_base + xi*input_row_stride;
+
+    for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+    {
+      x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+    }
+  }
+
+  // Matrices used/computed in this kernel.
+  float x[inner_tile_rows][inner_tile_cols];
+  float XTx[inner_tile_rows][inner_tile_cols];
+  float U[inner_tile_rows][inner_tile_cols];
+
+  for (int i = 0; i < inner_tile_rows; i++)
+  {
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[i][j] = XTx[i][j] = 0.0f;
+    }
+  }
+
+  // Perform the Winograd input transformation for each channel in the input
+  // tensor.
+  int channels_remaining = n_channels;
+#ifdef __aarch64__
+  for (; channels_remaining >= 4; channels_remaining -= 4)
+  {
+    // Matrices used/computed in this kernel.
+    float32x4_t x[inner_tile_rows][inner_tile_cols];
+    float32x4_t XTx[inner_tile_rows][inner_tile_cols];
+    float32x4_t U[inner_tile_rows][inner_tile_cols];
+
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vdupq_n_f32(0.0f);
+        XTx[i][j] = vdupq_n_f32(0.0f);
+      }
+    }
+
+    // Load x
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vld1q_f32(x_ptrs[i][j]);
+        x_ptrs[i][j] += 4;
+      }
+    }
+
+    // Compute XT . x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      // XTx[0][j] = x[0][j] - x[2][j];
+      XTx[0][j] = vsubq_f32(x[0][j], x[2][j]);
+
+      // XTx[1][j] = x[1][j] + x[2][j];
+      XTx[1][j] = vaddq_f32(x[1][j], x[2][j]);
+
+      // XTx[2][j] = x[2][j] - x[1][j];
+      XTx[2][j] = vsubq_f32(x[2][j], x[1][j]);
+
+      // XTx[3][j] = x[1][j] - x[3][j];
+      XTx[3][j] = vsubq_f32(x[1][j], x[3][j]);
+    }
+
+    // Compute U = XT . x . X
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      // U[i][0] = XTx[i][0] - XTx[i][2];
+      U[i][0] = vsubq_f32(XTx[i][0], XTx[i][2]);
+
+      // U[i][1] = XTx[i][1] + XTx[i][2];
+      U[i][1] = vaddq_f32(XTx[i][1], XTx[i][2]);
+
+      // U[i][2] = XTx[i][2] - XTx[i][1];
+      U[i][2] = vsubq_f32(XTx[i][2], XTx[i][1]);
+
+      // U[i][3] = XTx[i][1] - XTx[i][3];
+      U[i][3] = vsubq_f32(XTx[i][1], XTx[i][3]);
+    }
+
+    // Store the transformed matrix
+    for (int i = 0, m = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++, m++)
+      {
+        vst1q_f32(outptr + m*matrix_stride, U[i][j]);
+      }
+    }
+    outptr += 4;
+  }
+#endif  // __aarch64__
+#ifdef __arm_any__
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    // Matrices used/computed in this kernel.
+    float32x2_t x[inner_tile_rows][inner_tile_cols];
+    float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+    float32x2_t U[inner_tile_rows][inner_tile_cols];
+
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vdup_n_f32(0.0f);
+        XTx[i][j] = vdup_n_f32(0.0f);
+      }
+    }
+
+    // Load x
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vld1_f32(x_ptrs[i][j]);
+        x_ptrs[i][j] += 2;
+      }
+    }
+
+    // Compute XT . x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      // XTx[0][j] = x[0][j] - x[2][j];
+      XTx[0][j] = vsub_f32(x[0][j], x[2][j]);
+
+      // XTx[1][j] = x[1][j] + x[2][j];
+      XTx[1][j] = vadd_f32(x[1][j], x[2][j]);
+
+      // XTx[2][j] = x[2][j] - x[1][j];
+      XTx[2][j] = vsub_f32(x[2][j], x[1][j]);
+
+      // XTx[3][j] = x[1][j] - x[3][j];
+      XTx[3][j] = vsub_f32(x[1][j], x[3][j]);
+    }
+
+    // Compute U = XT . x . X
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      // U[i][0] = XTx[i][0] - XTx[i][2];
+      U[i][0] = vsub_f32(XTx[i][0], XTx[i][2]);
+
+      // U[i][1] = XTx[i][1] + XTx[i][2];
+      U[i][1] = vadd_f32(XTx[i][1], XTx[i][2]);
+
+      // U[i][2] = XTx[i][2] - XTx[i][1];
+      U[i][2] = vsub_f32(XTx[i][2], XTx[i][1]);
+
+      // U[i][3] = XTx[i][1] - XTx[i][3];
+      U[i][3] = vsub_f32(XTx[i][1], XTx[i][3]);
+    }
+
+    // Store the transformed matrix
+    for (int i = 0, m = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++, m++)
+      {
+        vst1_f32(outptr + m*matrix_stride, U[i][j]);
+      }
+    }
+    outptr += 2;
+  }
+#endif  // __arm_any__
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Load x
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = *(x_ptrs[i][j]++);
+      }
+    }
+
+    // Compute XT . x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      XTx[0][j] = x[0][j] - x[2][j];
+      XTx[1][j] = x[1][j] + x[2][j];
+      XTx[2][j] = x[2][j] - x[1][j];
+      XTx[3][j] = x[1][j] - x[3][j];
+    }
+
+    // Compute U = XT . x . X
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      U[i][0] = XTx[i][0] - XTx[i][2];
+      U[i][1] = XTx[i][1] + XTx[i][2];
+      U[i][2] = XTx[i][2] - XTx[i][1];
+      U[i][3] = XTx[i][1] - XTx[i][3];
+    }
+
+    // Store the transformed matrix
+    for (int i = 0, m = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++, m++)
+      {
+        *(outptr + m*matrix_stride) = U[i][j];
+      }
+    }
+    outptr++;
+  }
+}
+
+template class InputTransform<4, 4, float, float, WinogradRoots::Integers>;
+
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..908fc82
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp
@@ -0,0 +1,1308 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "input.hpp"
+
+namespace winograd
+{
+
+#ifdef __aarch64__
+
+template <>
+void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+  int n_channels,
+  const float* input_base,
+  const int input_row_stride,
+  const int input_col_stride,
+  float* matrix_base,
+  const int matrix_stride
+)
+{
+  const float pcoeffs[4] = {1.0f, 2.0f, 4.0f, 5.0f};
+  __asm__ __volatile__(
+    "ldr q0, [%[pcoeffs]]\n"
+    "add x25, %[inptr0], %[input_row_stride]\n"
+    "add x18, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x16, x25, %[input_row_stride]\n"
+    "add x19, x18, %[input_col_stride1]\n"
+    "add x26, x16, %[input_row_stride]\n"
+    "add x20, x19, %[input_col_stride1]\n"
+    "add x17, x26, %[input_row_stride]\n"
+    "add x21, x20, %[input_col_stride1]\n"
+    "add x27, x17, %[input_row_stride]\n"
+    "add x28, %[outptr0], %[output_row_stride]\n"
+    "add x11, %[output_col_stride1], %[output_col_stride1]\n"
+    "add x22, x28, %[output_row_stride]\n"
+    "add x13, x11, %[output_col_stride1]\n"
+    "add x12, x22, %[output_row_stride]\n"
+    "add x23, x13, %[output_col_stride1]\n"
+    "add x14, x12, %[output_row_stride]\n"
+    "add x15, x23, %[output_col_stride1]\n"
+    "add x24, x14, %[output_row_stride]\n"
+    "cmp %w[n_channels], #4\n"
+    "blt 2f\n"
+    "1:\n"
+    "ldr q8, [%[inptr0], x20]\n"
+    "ldr q2, [%[inptr0], x18]\n"
+    "mov v14.16b, v8.16b\n"
+    "ldr q9, [%[inptr0]]\n"
+    "mov v10.16b, v8.16b\n"
+    "ldr q1, [%[inptr0], x21]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "ldr q4, [%[inptr0], x19]\n"
+    "mov v9.16b, v8.16b\n"
+    "ldr q12, [%[inptr0], %[input_col_stride1]]\n"
+    "fmls v10.4s, v12.4s, v0.s[2]\n"
+    "ldr q5, [x16, x20]\n"
+    "fmls v14.4s, v2.4s, v0.s[3]\n"
+    "ldr q20, [x16, x18]\n"
+    "fmla v9.4s, v12.4s, v0.s[2]\n"
+    "ldr q3, [x16]\n"
+    "fmls v10.4s, v2.4s, v0.s[2]\n"
+    "ldr q6, [x16, x21]\n"
+    "mov v7.16b, v8.16b\n"
+    "ldr q16, [x16, x19]\n"
+    "fmls v9.4s, v2.4s, v0.s[2]\n"
+    "ldr q22, [x16, %[input_col_stride1]]\n"
+    "fadd v10.4s, v10.4s, v4.4s\n"
+    "ldr q17, [x17, x20]\n"
+    "fmls v7.4s, v12.4s, v0.s[1]\n"
+    "ldr q15, [x17, x18]\n"
+    "fsub v9.4s, v9.4s, v4.4s\n"
+    "ldr q19, [x17]\n"
+    "mov v8.16b, v8.16b\n"
+    "ldr q18, [x17, x21]\n"
+    "fsub v7.4s, v7.4s, v2.4s\n"
+    "ldr q13, [x17, x19]\n"
+    "fmla v7.4s, v4.4s, v0.s[1]\n"
+    "ldr q21, [x17, %[input_col_stride1]]\n"
+    "fmla v8.4s, v12.4s, v0.s[1]\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "mov v11.16b, v1.16b\n"
+    "add x16, x16, #16\n"
+    "mov v1.16b, v5.16b\n"
+    "add x17, x17, #16\n"
+    "fsub v8.4s, v8.4s, v2.4s\n"
+    "fmla v11.4s, v12.4s, v0.s[2]\n"
+    "fmls v8.4s, v4.4s, v0.s[1]\n"
+    "fmla v1.4s, v3.4s, v0.s[2]\n"
+    "mov v2.16b, v5.16b\n"
+    "mov v3.16b, v5.16b\n"
+    "fmls v11.4s, v4.4s, v0.s[3]\n"
+    "mov v4.16b, v5.16b\n"
+    "fmls v1.4s, v20.4s, v0.s[3]\n"
+    "fmls v2.4s, v22.4s, v0.s[2]\n"
+    "fmla v3.4s, v22.4s, v0.s[2]\n"
+    "fmls v4.4s, v22.4s, v0.s[1]\n"
+    "mov v5.16b, v5.16b\n"
+    "mov v6.16b, v6.16b\n"
+    "fmls v2.4s, v20.4s, v0.s[2]\n"
+    "mov v12.16b, v17.16b\n"
+    "fmls v3.4s, v20.4s, v0.s[2]\n"
+    "fsub v4.4s, v4.4s, v20.4s\n"
+    "fmla v4.4s, v16.4s, v0.s[1]\n"
+    "fmla v5.4s, v22.4s, v0.s[1]\n"
+    "fadd v2.4s, v2.4s, v16.4s\n"
+    "fmla v6.4s, v22.4s, v0.s[2]\n"
+    "fsub v3.4s, v3.4s, v16.4s\n"
+    "fmla v12.4s, v19.4s, v0.s[2]\n"
+    "fsub v5.4s, v5.4s, v20.4s\n"
+    "mov v19.16b, v17.16b\n"
+    "fmls v5.4s, v16.4s, v0.s[1]\n"
+    "fmls v6.4s, v16.4s, v0.s[3]\n"
+    "fmls v12.4s, v15.4s, v0.s[3]\n"
+    "fmls v19.4s, v21.4s, v0.s[2]\n"
+    "mov v20.16b, v17.16b\n"
+    "mov v16.16b, v17.16b\n"
+    "mov v17.16b, v17.16b\n"
+    "mov v18.16b, v18.16b\n"
+    "fmls v19.4s, v15.4s, v0.s[2]\n"
+    "fmla v20.4s, v21.4s, v0.s[2]\n"
+    "fmls v16.4s, v21.4s, v0.s[1]\n"
+    "fmla v17.4s, v21.4s, v0.s[1]\n"
+    "fmla v18.4s, v21.4s, v0.s[2]\n"
+    "mov v23.16b, v12.16b\n"
+    "fadd v19.4s, v19.4s, v13.4s\n"
+    "fmls v20.4s, v15.4s, v0.s[2]\n"
+    "fsub v16.4s, v16.4s, v15.4s\n"
+    "fsub v17.4s, v17.4s, v15.4s\n"
+    "fmla v16.4s, v13.4s, v0.s[1]\n"
+    "fmls v17.4s, v13.4s, v0.s[1]\n"
+    "fsub v20.4s, v20.4s, v13.4s\n"
+    "fmls v18.4s, v13.4s, v0.s[3]\n"
+    "fmla v23.4s, v14.4s, v0.s[2]\n"
+    "mov v15.16b, v19.16b\n"
+    "mov v14.16b, v20.16b\n"
+    "mov v24.16b, v16.16b\n"
+    "fmla v15.4s, v10.4s, v0.s[2]\n"
+    "mov v10.16b, v17.16b\n"
+    "fmls v23.4s, v1.4s, v0.s[3]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "fmla v24.4s, v7.4s, v0.s[2]\n"
+    "fmla v10.4s, v8.4s, v0.s[2]\n"
+    "fmls v15.4s, v2.4s, v0.s[3]\n"
+    "mov v7.16b, v18.16b\n"
+    "str q23, [%[outptr0]]\n"
+    "fmls v14.4s, v3.4s, v0.s[3]\n"
+    "fmls v24.4s, v4.4s, v0.s[3]\n"
+    "fmls v10.4s, v5.4s, v0.s[3]\n"
+    "str q15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v7.4s, v11.4s, v0.s[2]\n"
+    "str q14, [%[outptr0], x11]\n"
+    "str q24, [%[outptr0], x13]\n"
+    "str q10, [%[outptr0], x23]\n"
+    "fmls v7.4s, v6.4s, v0.s[3]\n"
+    "str q7, [%[outptr0], x15]\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "mov v26.16b, v12.16b\n"
+    "mov v25.16b, v19.16b\n"
+    "ldr q11, [x25, x20]\n"
+    "mov v10.16b, v11.16b\n"
+    "ldr q23, [x25, x18]\n"
+    "mov v9.16b, v11.16b\n"
+    "ldr q7, [x25]\n"
+    "fmla v10.4s, v7.4s, v0.s[2]\n"
+    "ldr q13, [x25, x21]\n"
+    "mov v7.16b, v11.16b\n"
+    "ldr q31, [x25, x19]\n"
+    "mov v8.16b, v11.16b\n"
+    "ldr q21, [x25, %[input_col_stride1]]\n"
+    "fmls v10.4s, v23.4s, v0.s[3]\n"
+    "ldr q30, [x26, x20]\n"
+    "fmls v9.4s, v21.4s, v0.s[2]\n"
+    "ldr q29, [x26, x18]\n"
+    "fmla v7.4s, v21.4s, v0.s[2]\n"
+    "ldr q22, [x26]\n"
+    "fmls v8.4s, v21.4s, v0.s[1]\n"
+    "ldr q24, [x26, x21]\n"
+    "fmls v9.4s, v23.4s, v0.s[2]\n"
+    "ldr q27, [x26, x19]\n"
+    "fmls v7.4s, v23.4s, v0.s[2]\n"
+    "ldr q28, [x26, %[input_col_stride1]]\n"
+    "fsub v8.4s, v8.4s, v23.4s\n"
+    "add x25, x25, #16\n"
+    "fadd v9.4s, v9.4s, v31.4s\n"
+    "add x26, x26, #16\n"
+    "fsub v7.4s, v7.4s, v31.4s\n"
+    "fmla v8.4s, v31.4s, v0.s[1]\n"
+    "mov v11.16b, v11.16b\n"
+    "mov v15.16b, v13.16b\n"
+    "mov v14.16b, v30.16b\n"
+    "mov v13.16b, v30.16b\n"
+    "fmla v11.4s, v21.4s, v0.s[1]\n"
+    "fmla v15.4s, v21.4s, v0.s[2]\n"
+    "fmla v14.4s, v22.4s, v0.s[2]\n"
+    "fmls v13.4s, v28.4s, v0.s[2]\n"
+    "mov v21.16b, v30.16b\n"
+    "mov v22.16b, v30.16b\n"
+    "fsub v11.4s, v11.4s, v23.4s\n"
+    "fmls v15.4s, v31.4s, v0.s[3]\n"
+    "fmls v11.4s, v31.4s, v0.s[1]\n"
+    "fmls v14.4s, v29.4s, v0.s[3]\n"
+    "fmls v13.4s, v29.4s, v0.s[2]\n"
+    "fmla v21.4s, v28.4s, v0.s[2]\n"
+    "fmls v22.4s, v28.4s, v0.s[1]\n"
+    "mov v23.16b, v30.16b\n"
+    "mov v24.16b, v24.16b\n"
+    "fmls v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v13.4s, v13.4s, v27.4s\n"
+    "fmls v21.4s, v29.4s, v0.s[2]\n"
+    "fsub v22.4s, v22.4s, v29.4s\n"
+    "fmla v23.4s, v28.4s, v0.s[1]\n"
+    "fmla v22.4s, v27.4s, v0.s[1]\n"
+    "fmla v24.4s, v28.4s, v0.s[2]\n"
+    "fsub v21.4s, v21.4s, v27.4s\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fsub v23.4s, v23.4s, v29.4s\n"
+    "fmls v25.4s, v9.4s, v0.s[2]\n"
+    "fmls v23.4s, v27.4s, v0.s[1]\n"
+    "fmls v24.4s, v27.4s, v0.s[3]\n"
+    "fadd v26.4s, v26.4s, v14.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str q26, [x28]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "fmls v27.4s, v7.4s, v0.s[2]\n"
+    "mov v31.16b, v16.16b\n"
+    "mov v30.16b, v17.16b\n"
+    "mov v29.16b, v18.16b\n"
+    "fadd v25.4s, v25.4s, v13.4s\n"
+    "fmls v31.4s, v8.4s, v0.s[2]\n"
+    "str q25, [x28, %[output_col_stride1]]\n"
+    "fmls v27.4s, v3.4s, v0.s[2]\n"
+    "fmls v30.4s, v11.4s, v0.s[2]\n"
+    "fmls v29.4s, v15.4s, v0.s[2]\n"
+    "fmls v31.4s, v4.4s, v0.s[2]\n"
+    "mov v26.16b, v12.16b\n"
+    "fadd v27.4s, v27.4s, v21.4s\n"
+    "mov v25.16b, v19.16b\n"
+    "str q27, [x28, x11]\n"
+    "fmls v30.4s, v5.4s, v0.s[2]\n"
+    "fadd v31.4s, v31.4s, v22.4s\n"
+    "fmls v29.4s, v6.4s, v0.s[2]\n"
+    "str q31, [x28, x13]\n"
+    "fmla v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v30.4s, v30.4s, v23.4s\n"
+    "fmla v25.4s, v9.4s, v0.s[2]\n"
+    "str q30, [x28, x23]\n"
+    "fadd v29.4s, v29.4s, v24.4s\n"
+    "str q29, [x28, x15]\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "add x28, x28, #16\n"
+    "mov v30.16b, v20.16b\n"
+    "mov v29.16b, v16.16b\n"
+    "fsub v26.4s, v26.4s, v14.4s\n"
+    "mov v28.16b, v17.16b\n"
+    "str q26, [x22]\n"
+    "fsub v25.4s, v25.4s, v13.4s\n"
+    "str q25, [x22, %[output_col_stride1]]\n"
+    "fmla v30.4s, v7.4s, v0.s[2]\n"
+    "fmla v29.4s, v8.4s, v0.s[2]\n"
+    "fmla v28.4s, v11.4s, v0.s[2]\n"
+    "mov v26.16b, v18.16b\n"
+    "mov v25.16b, v12.16b\n"
+    "fmls v30.4s, v3.4s, v0.s[2]\n"
+    "mov v31.16b, v19.16b\n"
+    "fmls v29.4s, v4.4s, v0.s[2]\n"
+    "fmls v28.4s, v5.4s, v0.s[2]\n"
+    "fmla v26.4s, v15.4s, v0.s[2]\n"
+    "fmls v25.4s, v10.4s, v0.s[1]\n"
+    "fsub v30.4s, v30.4s, v21.4s\n"
+    "fmls v31.4s, v9.4s, v0.s[1]\n"
+    "str q30, [x22, x11]\n"
+    "fsub v29.4s, v29.4s, v22.4s\n"
+    "str q29, [x22, x13]\n"
+    "fsub v28.4s, v28.4s, v23.4s\n"
+    "str q28, [x22, x23]\n"
+    "fmls v26.4s, v6.4s, v0.s[2]\n"
+    "fsub v25.4s, v25.4s, v1.4s\n"
+    "fsub v31.4s, v31.4s, v2.4s\n"
+    "fmla v25.4s, v14.4s, v0.s[1]\n"
+    "fmla v31.4s, v13.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v24.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str q26, [x22, x15]\n"
+    "mov v26.16b, v16.16b\n"
+    "str q25, [x12]\n"
+    "fmls v27.4s, v7.4s, v0.s[1]\n"
+    "str q31, [x12, %[output_col_stride1]]\n"
+    "fmls v26.4s, v8.4s, v0.s[1]\n"
+    "mov v25.16b, v17.16b\n"
+    "add x22, x22, #16\n"
+    "fsub v27.4s, v27.4s, v3.4s\n"
+    "mov v28.16b, v18.16b\n"
+    "fmla v27.4s, v21.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v4.4s\n"
+    "fmla v26.4s, v22.4s, v0.s[1]\n"
+    "fmls v25.4s, v11.4s, v0.s[1]\n"
+    "fmls v28.4s, v15.4s, v0.s[1]\n"
+    "mov v12.16b, v12.16b\n"
+    "str q27, [x12, x11]\n"
+    "mov v19.16b, v19.16b\n"
+    "str q26, [x12, x13]\n"
+    "fsub v25.4s, v25.4s, v5.4s\n"
+    "fmla v25.4s, v23.4s, v0.s[1]\n"
+    "fsub v28.4s, v28.4s, v6.4s\n"
+    "fmla v28.4s, v24.4s, v0.s[1]\n"
+    "fmla v12.4s, v10.4s, v0.s[1]\n"
+    "fmla v19.4s, v9.4s, v0.s[1]\n"
+    "mov v20.16b, v20.16b\n"
+    "str q25, [x12, x23]\n"
+    "mov v16.16b, v16.16b\n"
+    "str q28, [x12, x15]\n"
+    "fsub v12.4s, v12.4s, v1.4s\n"
+    "fmls v12.4s, v14.4s, v0.s[1]\n"
+    "add x12, x12, #16\n"
+    "fsub v19.4s, v19.4s, v2.4s\n"
+    "fmla v20.4s, v7.4s, v0.s[1]\n"
+    "fmls v19.4s, v13.4s, v0.s[1]\n"
+    "fmla v16.4s, v8.4s, v0.s[1]\n"
+    "str q12, [x14]\n"
+    "mov v1.16b, v17.16b\n"
+    "fsub v20.4s, v20.4s, v3.4s\n"
+    "mov v17.16b, v18.16b\n"
+    "str q19, [x14, %[output_col_stride1]]\n"
+    "fmls v20.4s, v21.4s, v0.s[1]\n"
+    "fsub v16.4s, v16.4s, v4.4s\n"
+    "fmla v1.4s, v11.4s, v0.s[1]\n"
+    "fmls v16.4s, v22.4s, v0.s[1]\n"
+    "fmla v17.4s, v15.4s, v0.s[1]\n"
+    "str q20, [x14, x11]\n"
+    "fsub v1.4s, v1.4s, v5.4s\n"
+    "str q16, [x14, x13]\n"
+    "fmls v1.4s, v23.4s, v0.s[1]\n"
+    "fsub v17.4s, v17.4s, v6.4s\n"
+    "fmls v17.4s, v24.4s, v0.s[1]\n"
+    "str q1, [x14, x23]\n"
+    "str q17, [x14, x15]\n"
+    "add x14, x14, #16\n"
+    "ldr q2, [x27, x20]\n"
+    "mov v4.16b, v2.16b\n"
+    "ldr q17, [x27, x18]\n"
+    "mov v12.16b, v2.16b\n"
+    "ldr q18, [x27]\n"
+    "fmla v4.4s, v18.4s, v0.s[2]\n"
+    "ldr q3, [x27, x21]\n"
+    "mov v6.16b, v2.16b\n"
+    "ldr q5, [x27, x19]\n"
+    "mov v1.16b, v2.16b\n"
+    "ldr q18, [x27, %[input_col_stride1]]\n"
+    "fmls v4.4s, v17.4s, v0.s[3]\n"
+    "add x27, x27, #16\n"
+    "fmls v12.4s, v18.4s, v0.s[2]\n"
+    "sub %w[n_channels], %w[n_channels], #4\n"
+    "fmla v6.4s, v18.4s, v0.s[2]\n"
+    "cmp %w[n_channels], #4\n"
+    "fmls v1.4s, v18.4s, v0.s[1]\n"
+    "mov v2.16b, v2.16b\n"
+    "fmls v12.4s, v17.4s, v0.s[2]\n"
+    "mov v3.16b, v3.16b\n"
+    "fmls v6.4s, v17.4s, v0.s[2]\n"
+    "fmla v2.4s, v18.4s, v0.s[1]\n"
+    "fsub v1.4s, v1.4s, v17.4s\n"
+    "fmla v3.4s, v18.4s, v0.s[2]\n"
+    "fadd v12.4s, v12.4s, v5.4s\n"
+    "fmla v1.4s, v5.4s, v0.s[1]\n"
+    "fsub v6.4s, v6.4s, v5.4s\n"
+    "fsub v2.4s, v2.4s, v17.4s\n"
+    "fmls v2.4s, v5.4s, v0.s[1]\n"
+    "fmls v3.4s, v5.4s, v0.s[3]\n"
+    "mov v4.16b, v4.16b\n"
+    "mov v16.16b, v12.16b\n"
+    "mov v5.16b, v6.16b\n"
+    "mov v6.16b, v1.16b\n"
+    "fmla v4.4s, v10.4s, v0.s[2]\n"
+    "fmla v16.4s, v9.4s, v0.s[2]\n"
+    "fmla v5.4s, v7.4s, v0.s[2]\n"
+    "fmla v6.4s, v8.4s, v0.s[2]\n"
+    "mov v9.16b, v2.16b\n"
+    "mov v10.16b, v3.16b\n"
+    "fmls v4.4s, v14.4s, v0.s[3]\n"
+    "fmls v16.4s, v13.4s, v0.s[3]\n"
+    "fmls v5.4s, v21.4s, v0.s[3]\n"
+    "fmls v6.4s, v22.4s, v0.s[3]\n"
+    "fmla v9.4s, v11.4s, v0.s[2]\n"
+    "fmla v10.4s, v15.4s, v0.s[2]\n"
+    "str q4, [x24]\n"
+    "str q16, [x24, %[output_col_stride1]]\n"
+    "str q5, [x24, x11]\n"
+    "str q6, [x24, x13]\n"
+    "fmls v9.4s, v23.4s, v0.s[3]\n"
+    "fmls v10.4s, v24.4s, v0.s[3]\n"
+    "str q9, [x24, x23]\n"
+    "str q10, [x24, x15]\n"
+    "add x24, x24, #16\n"
+    "bge 1b\n"
+    "2:\n"
+    "cmp %w[n_channels], #2\n"
+    "blt 3f\n"
+    "ldr d8, [%[inptr0], x20]\n"
+    "mov v14.16b, v8.16b\n"
+    "ldr d2, [%[inptr0], x18]\n"
+    "mov v10.16b, v8.16b\n"
+    "ldr d9, [%[inptr0]]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "ldr d1, [%[inptr0], x21]\n"
+    "mov v9.16b, v8.16b\n"
+    "ldr d4, [%[inptr0], x19]\n"
+    "mov v7.16b, v8.16b\n"
+    "ldr d12, [%[inptr0], %[input_col_stride1]]\n"
+    "fmls v14.4s, v2.4s, v0.s[3]\n"
+    "ldr d5, [x16, x20]\n"
+    "fmls v10.4s, v12.4s, v0.s[2]\n"
+    "ldr d20, [x16, x18]\n"
+    "fmla v9.4s, v12.4s, v0.s[2]\n"
+    "ldr d3, [x16]\n"
+    "fmls v7.4s, v12.4s, v0.s[1]\n"
+    "ldr d6, [x16, x21]\n"
+    "fmls v10.4s, v2.4s, v0.s[2]\n"
+    "ldr d16, [x16, x19]\n"
+    "fmls v9.4s, v2.4s, v0.s[2]\n"
+    "ldr d22, [x16, %[input_col_stride1]]\n"
+    "fsub v7.4s, v7.4s, v2.4s\n"
+    "ldr d17, [x17, x20]\n"
+    "fadd v10.4s, v10.4s, v4.4s\n"
+    "ldr d15, [x17, x18]\n"
+    "fsub v9.4s, v9.4s, v4.4s\n"
+    "ldr d19, [x17]\n"
+    "fmla v7.4s, v4.4s, v0.s[1]\n"
+    "ldr d18, [x17, x21]\n"
+    "mov v8.16b, v8.16b\n"
+    "ldr d13, [x17, x19]\n"
+    "mov v11.16b, v1.16b\n"
+    "ldr d21, [x17, %[input_col_stride1]]\n"
+    "fmla v8.4s, v12.4s, v0.s[1]\n"
+    "add %[inptr0], %[inptr0], #8\n"
+    "fmla v11.4s, v12.4s, v0.s[2]\n"
+    "add x16, x16, #8\n"
+    "mov v1.16b, v5.16b\n"
+    "add x17, x17, #8\n"
+    "fsub v8.4s, v8.4s, v2.4s\n"
+    "mov v2.16b, v5.16b\n"
+    "fmls v8.4s, v4.4s, v0.s[1]\n"
+    "fmls v11.4s, v4.4s, v0.s[3]\n"
+    "fmla v1.4s, v3.4s, v0.s[2]\n"
+    "fmls v2.4s, v22.4s, v0.s[2]\n"
+    "mov v3.16b, v5.16b\n"
+    "mov v4.16b, v5.16b\n"
+    "mov v5.16b, v5.16b\n"
+    "mov v6.16b, v6.16b\n"
+    "fmls v1.4s, v20.4s, v0.s[3]\n"
+    "fmls v2.4s, v20.4s, v0.s[2]\n"
+    "fmla v3.4s, v22.4s, v0.s[2]\n"
+    "fmls v4.4s, v22.4s, v0.s[1]\n"
+    "fmla v5.4s, v22.4s, v0.s[1]\n"
+    "fmla v6.4s, v22.4s, v0.s[2]\n"
+    "fadd v2.4s, v2.4s, v16.4s\n"
+    "mov v12.16b, v17.16b\n"
+    "fmls v3.4s, v20.4s, v0.s[2]\n"
+    "fsub v4.4s, v4.4s, v20.4s\n"
+    "fmla v4.4s, v16.4s, v0.s[1]\n"
+    "fsub v5.4s, v5.4s, v20.4s\n"
+    "fmls v5.4s, v16.4s, v0.s[1]\n"
+    "fmls v6.4s, v16.4s, v0.s[3]\n"
+    "fsub v3.4s, v3.4s, v16.4s\n"
+    "fmla v12.4s, v19.4s, v0.s[2]\n"
+    "mov v19.16b, v17.16b\n"
+    "mov v20.16b, v17.16b\n"
+    "mov v16.16b, v17.16b\n"
+    "mov v17.16b, v17.16b\n"
+    "fmls v12.4s, v15.4s, v0.s[3]\n"
+    "fmls v19.4s, v21.4s, v0.s[2]\n"
+    "fmla v20.4s, v21.4s, v0.s[2]\n"
+    "fmls v16.4s, v21.4s, v0.s[1]\n"
+    "fmla v17.4s, v21.4s, v0.s[1]\n"
+    "mov v18.16b, v18.16b\n"
+    "fmls v19.4s, v15.4s, v0.s[2]\n"
+    "mov v23.16b, v12.16b\n"
+    "fmls v20.4s, v15.4s, v0.s[2]\n"
+    "fsub v16.4s, v16.4s, v15.4s\n"
+    "fmla v16.4s, v13.4s, v0.s[1]\n"
+    "fsub v17.4s, v17.4s, v15.4s\n"
+    "fadd v19.4s, v19.4s, v13.4s\n"
+    "fmls v17.4s, v13.4s, v0.s[1]\n"
+    "fsub v20.4s, v20.4s, v13.4s\n"
+    "fmla v18.4s, v21.4s, v0.s[2]\n"
+    "fmla v23.4s, v14.4s, v0.s[2]\n"
+    "mov v15.16b, v19.16b\n"
+    "mov v14.16b, v20.16b\n"
+    "mov v24.16b, v16.16b\n"
+    "fmls v18.4s, v13.4s, v0.s[3]\n"
+    "fmla v15.4s, v10.4s, v0.s[2]\n"
+    "fmls v23.4s, v1.4s, v0.s[3]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "fmla v24.4s, v7.4s, v0.s[2]\n"
+    "mov v10.16b, v17.16b\n"
+    "fmls v15.4s, v2.4s, v0.s[3]\n"
+    "mov v7.16b, v18.16b\n"
+    "str d23, [%[outptr0]]\n"
+    "fmls v14.4s, v3.4s, v0.s[3]\n"
+    "fmls v24.4s, v4.4s, v0.s[3]\n"
+    "fmla v10.4s, v8.4s, v0.s[2]\n"
+    "str d15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v7.4s, v11.4s, v0.s[2]\n"
+    "str d14, [%[outptr0], x11]\n"
+    "fmls v10.4s, v5.4s, v0.s[3]\n"
+    "str d24, [%[outptr0], x13]\n"
+    "fmls v7.4s, v6.4s, v0.s[3]\n"
+    "str d10, [%[outptr0], x23]\n"
+    "str d7, [%[outptr0], x15]\n"
+    "add %[outptr0], %[outptr0], #8\n"
+    "mov v26.16b, v12.16b\n"
+    "mov v25.16b, v19.16b\n"
+    "ldr d11, [x25, x20]\n"
+    "mov v10.16b, v11.16b\n"
+    "ldr d23, [x25, x18]\n"
+    "mov v9.16b, v11.16b\n"
+    "ldr d7, [x25]\n"
+    "fmla v10.4s, v7.4s, v0.s[2]\n"
+    "ldr d13, [x25, x21]\n"
+    "mov v7.16b, v11.16b\n"
+    "ldr d31, [x25, x19]\n"
+    "mov v8.16b, v11.16b\n"
+    "ldr d21, [x25, %[input_col_stride1]]\n"
+    "fmls v10.4s, v23.4s, v0.s[3]\n"
+    "ldr d30, [x26, x20]\n"
+    "fmls v9.4s, v21.4s, v0.s[2]\n"
+    "ldr d29, [x26, x18]\n"
+    "fmla v7.4s, v21.4s, v0.s[2]\n"
+    "ldr d22, [x26]\n"
+    "fmls v8.4s, v21.4s, v0.s[1]\n"
+    "ldr d24, [x26, x21]\n"
+    "fmls v9.4s, v23.4s, v0.s[2]\n"
+    "ldr d27, [x26, x19]\n"
+    "fmls v7.4s, v23.4s, v0.s[2]\n"
+    "ldr d28, [x26, %[input_col_stride1]]\n"
+    "fsub v8.4s, v8.4s, v23.4s\n"
+    "add x25, x25, #8\n"
+    "fadd v9.4s, v9.4s, v31.4s\n"
+    "add x26, x26, #8\n"
+    "fsub v7.4s, v7.4s, v31.4s\n"
+    "fmla v8.4s, v31.4s, v0.s[1]\n"
+    "mov v11.16b, v11.16b\n"
+    "mov v15.16b, v13.16b\n"
+    "mov v14.16b, v30.16b\n"
+    "mov v13.16b, v30.16b\n"
+    "fmla v11.4s, v21.4s, v0.s[1]\n"
+    "fmla v15.4s, v21.4s, v0.s[2]\n"
+    "fmla v14.4s, v22.4s, v0.s[2]\n"
+    "fmls v13.4s, v28.4s, v0.s[2]\n"
+    "mov v21.16b, v30.16b\n"
+    "mov v22.16b, v30.16b\n"
+    "fsub v11.4s, v11.4s, v23.4s\n"
+    "fmls v15.4s, v31.4s, v0.s[3]\n"
+    "fmls v11.4s, v31.4s, v0.s[1]\n"
+    "fmls v14.4s, v29.4s, v0.s[3]\n"
+    "fmls v13.4s, v29.4s, v0.s[2]\n"
+    "fmla v21.4s, v28.4s, v0.s[2]\n"
+    "fmls v22.4s, v28.4s, v0.s[1]\n"
+    "mov v23.16b, v30.16b\n"
+    "mov v24.16b, v24.16b\n"
+    "fmls v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v13.4s, v13.4s, v27.4s\n"
+    "fmls v21.4s, v29.4s, v0.s[2]\n"
+    "fsub v22.4s, v22.4s, v29.4s\n"
+    "fmla v23.4s, v28.4s, v0.s[1]\n"
+    "fmla v22.4s, v27.4s, v0.s[1]\n"
+    "fmla v24.4s, v28.4s, v0.s[2]\n"
+    "fsub v21.4s, v21.4s, v27.4s\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fsub v23.4s, v23.4s, v29.4s\n"
+    "fmls v25.4s, v9.4s, v0.s[2]\n"
+    "fmls v23.4s, v27.4s, v0.s[1]\n"
+    "fmls v24.4s, v27.4s, v0.s[3]\n"
+    "fadd v26.4s, v26.4s, v14.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str d26, [x28]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "fmls v27.4s, v7.4s, v0.s[2]\n"
+    "mov v31.16b, v16.16b\n"
+    "mov v30.16b, v17.16b\n"
+    "mov v29.16b, v18.16b\n"
+    "fadd v25.4s, v25.4s, v13.4s\n"
+    "fmls v31.4s, v8.4s, v0.s[2]\n"
+    "str d25, [x28, %[output_col_stride1]]\n"
+    "fmls v27.4s, v3.4s, v0.s[2]\n"
+    "fmls v30.4s, v11.4s, v0.s[2]\n"
+    "fmls v29.4s, v15.4s, v0.s[2]\n"
+    "fmls v31.4s, v4.4s, v0.s[2]\n"
+    "mov v26.16b, v12.16b\n"
+    "fadd v27.4s, v27.4s, v21.4s\n"
+    "mov v25.16b, v19.16b\n"
+    "str d27, [x28, x11]\n"
+    "fmls v30.4s, v5.4s, v0.s[2]\n"
+    "fadd v31.4s, v31.4s, v22.4s\n"
+    "fmls v29.4s, v6.4s, v0.s[2]\n"
+    "str d31, [x28, x13]\n"
+    "fmla v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v30.4s, v30.4s, v23.4s\n"
+    "fmla v25.4s, v9.4s, v0.s[2]\n"
+    "str d30, [x28, x23]\n"
+    "fadd v29.4s, v29.4s, v24.4s\n"
+    "str d29, [x28, x15]\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "add x28, x28, #8\n"
+    "mov v30.16b, v20.16b\n"
+    "mov v29.16b, v16.16b\n"
+    "fsub v26.4s, v26.4s, v14.4s\n"
+    "mov v28.16b, v17.16b\n"
+    "str d26, [x22]\n"
+    "fsub v25.4s, v25.4s, v13.4s\n"
+    "str d25, [x22, %[output_col_stride1]]\n"
+    "fmla v30.4s, v7.4s, v0.s[2]\n"
+    "fmla v29.4s, v8.4s, v0.s[2]\n"
+    "fmla v28.4s, v11.4s, v0.s[2]\n"
+    "mov v26.16b, v18.16b\n"
+    "mov v25.16b, v12.16b\n"
+    "fmls v30.4s, v3.4s, v0.s[2]\n"
+    "mov v31.16b, v19.16b\n"
+    "fmls v29.4s, v4.4s, v0.s[2]\n"
+    "fmls v28.4s, v5.4s, v0.s[2]\n"
+    "fmla v26.4s, v15.4s, v0.s[2]\n"
+    "fmls v25.4s, v10.4s, v0.s[1]\n"
+    "fsub v30.4s, v30.4s, v21.4s\n"
+    "fmls v31.4s, v9.4s, v0.s[1]\n"
+    "str d30, [x22, x11]\n"
+    "fsub v29.4s, v29.4s, v22.4s\n"
+    "str d29, [x22, x13]\n"
+    "fsub v28.4s, v28.4s, v23.4s\n"
+    "str d28, [x22, x23]\n"
+    "fmls v26.4s, v6.4s, v0.s[2]\n"
+    "fsub v25.4s, v25.4s, v1.4s\n"
+    "fsub v31.4s, v31.4s, v2.4s\n"
+    "fmla v25.4s, v14.4s, v0.s[1]\n"
+    "fmla v31.4s, v13.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v24.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str d26, [x22, x15]\n"
+    "mov v26.16b, v16.16b\n"
+    "str d25, [x12]\n"
+    "fmls v27.4s, v7.4s, v0.s[1]\n"
+    "str d31, [x12, %[output_col_stride1]]\n"
+    "fmls v26.4s, v8.4s, v0.s[1]\n"
+    "mov v25.16b, v17.16b\n"
+    "add x22, x22, #8\n"
+    "fsub v27.4s, v27.4s, v3.4s\n"
+    "mov v28.16b, v18.16b\n"
+    "fmla v27.4s, v21.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v4.4s\n"
+    "fmla v26.4s, v22.4s, v0.s[1]\n"
+    "fmls v25.4s, v11.4s, v0.s[1]\n"
+    "fmls v28.4s, v15.4s, v0.s[1]\n"
+    "mov v12.16b, v12.16b\n"
+    "str d27, [x12, x11]\n"
+    "mov v19.16b, v19.16b\n"
+    "str d26, [x12, x13]\n"
+    "fsub v25.4s, v25.4s, v5.4s\n"
+    "fmla v25.4s, v23.4s, v0.s[1]\n"
+    "fsub v28.4s, v28.4s, v6.4s\n"
+    "fmla v28.4s, v24.4s, v0.s[1]\n"
+    "fmla v12.4s, v10.4s, v0.s[1]\n"
+    "fmla v19.4s, v9.4s, v0.s[1]\n"
+    "mov v20.16b, v20.16b\n"
+    "str d25, [x12, x23]\n"
+    "mov v16.16b, v16.16b\n"
+    "str d28, [x12, x15]\n"
+    "fsub v12.4s, v12.4s, v1.4s\n"
+    "fmls v12.4s, v14.4s, v0.s[1]\n"
+    "add x12, x12, #8\n"
+    "fsub v19.4s, v19.4s, v2.4s\n"
+    "fmla v20.4s, v7.4s, v0.s[1]\n"
+    "fmls v19.4s, v13.4s, v0.s[1]\n"
+    "fmla v16.4s, v8.4s, v0.s[1]\n"
+    "str d12, [x14]\n"
+    "mov v1.16b, v17.16b\n"
+    "fsub v20.4s, v20.4s, v3.4s\n"
+    "mov v17.16b, v18.16b\n"
+    "str d19, [x14, %[output_col_stride1]]\n"
+    "fmls v20.4s, v21.4s, v0.s[1]\n"
+    "fsub v16.4s, v16.4s, v4.4s\n"
+    "fmla v1.4s, v11.4s, v0.s[1]\n"
+    "fmls v16.4s, v22.4s, v0.s[1]\n"
+    "fmla v17.4s, v15.4s, v0.s[1]\n"
+    "str d20, [x14, x11]\n"
+    "fsub v1.4s, v1.4s, v5.4s\n"
+    "str d16, [x14, x13]\n"
+    "fmls v1.4s, v23.4s, v0.s[1]\n"
+    "fsub v17.4s, v17.4s, v6.4s\n"
+    "fmls v17.4s, v24.4s, v0.s[1]\n"
+    "str d1, [x14, x23]\n"
+    "str d17, [x14, x15]\n"
+    "add x14, x14, #8\n"
+    "ldr d2, [x27, x20]\n"
+    "mov v4.16b, v2.16b\n"
+    "ldr d17, [x27, x18]\n"
+    "mov v12.16b, v2.16b\n"
+    "ldr d18, [x27]\n"
+    "fmla v4.4s, v18.4s, v0.s[2]\n"
+    "ldr d3, [x27, x21]\n"
+    "mov v6.16b, v2.16b\n"
+    "ldr d5, [x27, x19]\n"
+    "mov v1.16b, v2.16b\n"
+    "ldr d18, [x27, %[input_col_stride1]]\n"
+    "fmls v4.4s, v17.4s, v0.s[3]\n"
+    "add x27, x27, #8\n"
+    "fmls v12.4s, v18.4s, v0.s[2]\n"
+    "sub %w[n_channels], %w[n_channels], #2\n"
+    "fmla v6.4s, v18.4s, v0.s[2]\n"
+    "fmls v1.4s, v18.4s, v0.s[1]\n"
+    "mov v2.16b, v2.16b\n"
+    "mov v3.16b, v3.16b\n"
+    "fmls v12.4s, v17.4s, v0.s[2]\n"
+    "mov v4.16b, v4.16b\n"
+    "fmls v6.4s, v17.4s, v0.s[2]\n"
+    "fsub v1.4s, v1.4s, v17.4s\n"
+    "fmla v1.4s, v5.4s, v0.s[1]\n"
+    "fmla v2.4s, v18.4s, v0.s[1]\n"
+    "fadd v12.4s, v12.4s, v5.4s\n"
+    "fmla v3.4s, v18.4s, v0.s[2]\n"
+    "fsub v6.4s, v6.4s, v5.4s\n"
+    "fmla v4.4s, v10.4s, v0.s[2]\n"
+    "fsub v2.4s, v2.4s, v17.4s\n"
+    "mov v16.16b, v12.16b\n"
+    "fmls v2.4s, v5.4s, v0.s[1]\n"
+    "fmls v3.4s, v5.4s, v0.s[3]\n"
+    "fmls v4.4s, v14.4s, v0.s[3]\n"
+    "fmla v16.4s, v9.4s, v0.s[2]\n"
+    "mov v5.16b, v6.16b\n"
+    "mov v6.16b, v1.16b\n"
+    "mov v9.16b, v2.16b\n"
+    "mov v10.16b, v3.16b\n"
+    "str d4, [x24]\n"
+    "fmls v16.4s, v13.4s, v0.s[3]\n"
+    "fmla v5.4s, v7.4s, v0.s[2]\n"
+    "fmla v6.4s, v8.4s, v0.s[2]\n"
+    "fmla v9.4s, v11.4s, v0.s[2]\n"
+    "fmla v10.4s, v15.4s, v0.s[2]\n"
+    "str d16, [x24, %[output_col_stride1]]\n"
+    "fmls v5.4s, v21.4s, v0.s[3]\n"
+    "fmls v6.4s, v22.4s, v0.s[3]\n"
+    "fmls v9.4s, v23.4s, v0.s[3]\n"
+    "fmls v10.4s, v24.4s, v0.s[3]\n"
+    "str d5, [x24, x11]\n"
+    "str d6, [x24, x13]\n"
+    "str d9, [x24, x23]\n"
+    "str d10, [x24, x15]\n"
+    "add x24, x24, #8\n"
+    "3:\n"
+    "cbz %w[n_channels], 4f\n"
+    "ldr s8, [%[inptr0], x20]\n"
+    "mov v14.16b, v8.16b\n"
+    "ldr s2, [%[inptr0], x18]\n"
+    "mov v10.16b, v8.16b\n"
+    "ldr s9, [%[inptr0]]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "ldr s1, [%[inptr0], x21]\n"
+    "mov v9.16b, v8.16b\n"
+    "ldr s4, [%[inptr0], x19]\n"
+    "mov v7.16b, v8.16b\n"
+    "ldr s12, [%[inptr0], %[input_col_stride1]]\n"
+    "fmls v14.4s, v2.4s, v0.s[3]\n"
+    "ldr s5, [x16, x20]\n"
+    "fmls v10.4s, v12.4s, v0.s[2]\n"
+    "ldr s20, [x16, x18]\n"
+    "fmla v9.4s, v12.4s, v0.s[2]\n"
+    "ldr s3, [x16]\n"
+    "fmls v7.4s, v12.4s, v0.s[1]\n"
+    "ldr s6, [x16, x21]\n"
+    "fmls v10.4s, v2.4s, v0.s[2]\n"
+    "ldr s16, [x16, x19]\n"
+    "fmls v9.4s, v2.4s, v0.s[2]\n"
+    "ldr s22, [x16, %[input_col_stride1]]\n"
+    "fsub v7.4s, v7.4s, v2.4s\n"
+    "ldr s17, [x17, x20]\n"
+    "fadd v10.4s, v10.4s, v4.4s\n"
+    "ldr s15, [x17, x18]\n"
+    "fsub v9.4s, v9.4s, v4.4s\n"
+    "ldr s19, [x17]\n"
+    "fmla v7.4s, v4.4s, v0.s[1]\n"
+    "ldr s18, [x17, x21]\n"
+    "mov v8.16b, v8.16b\n"
+    "ldr s13, [x17, x19]\n"
+    "mov v11.16b, v1.16b\n"
+    "ldr s21, [x17, %[input_col_stride1]]\n"
+    "fmla v8.4s, v12.4s, v0.s[1]\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v11.4s, v12.4s, v0.s[2]\n"
+    "add x16, x16, #4\n"
+    "mov v1.16b, v5.16b\n"
+    "add x17, x17, #4\n"
+    "fsub v8.4s, v8.4s, v2.4s\n"
+    "mov v2.16b, v5.16b\n"
+    "fmls v8.4s, v4.4s, v0.s[1]\n"
+    "fmls v11.4s, v4.4s, v0.s[3]\n"
+    "fmla v1.4s, v3.4s, v0.s[2]\n"
+    "fmls v2.4s, v22.4s, v0.s[2]\n"
+    "mov v3.16b, v5.16b\n"
+    "mov v4.16b, v5.16b\n"
+    "mov v5.16b, v5.16b\n"
+    "mov v6.16b, v6.16b\n"
+    "fmls v1.4s, v20.4s, v0.s[3]\n"
+    "fmls v2.4s, v20.4s, v0.s[2]\n"
+    "fmla v3.4s, v22.4s, v0.s[2]\n"
+    "fmls v4.4s, v22.4s, v0.s[1]\n"
+    "fmla v5.4s, v22.4s, v0.s[1]\n"
+    "fmla v6.4s, v22.4s, v0.s[2]\n"
+    "fadd v2.4s, v2.4s, v16.4s\n"
+    "mov v12.16b, v17.16b\n"
+    "fmls v3.4s, v20.4s, v0.s[2]\n"
+    "fsub v4.4s, v4.4s, v20.4s\n"
+    "fmla v4.4s, v16.4s, v0.s[1]\n"
+    "fsub v5.4s, v5.4s, v20.4s\n"
+    "fmls v5.4s, v16.4s, v0.s[1]\n"
+    "fmls v6.4s, v16.4s, v0.s[3]\n"
+    "fsub v3.4s, v3.4s, v16.4s\n"
+    "fmla v12.4s, v19.4s, v0.s[2]\n"
+    "mov v19.16b, v17.16b\n"
+    "mov v20.16b, v17.16b\n"
+    "mov v16.16b, v17.16b\n"
+    "mov v17.16b, v17.16b\n"
+    "fmls v12.4s, v15.4s, v0.s[3]\n"
+    "fmls v19.4s, v21.4s, v0.s[2]\n"
+    "fmla v20.4s, v21.4s, v0.s[2]\n"
+    "fmls v16.4s, v21.4s, v0.s[1]\n"
+    "fmla v17.4s, v21.4s, v0.s[1]\n"
+    "mov v18.16b, v18.16b\n"
+    "fmls v19.4s, v15.4s, v0.s[2]\n"
+    "mov v23.16b, v12.16b\n"
+    "fmls v20.4s, v15.4s, v0.s[2]\n"
+    "fsub v16.4s, v16.4s, v15.4s\n"
+    "fmla v16.4s, v13.4s, v0.s[1]\n"
+    "fsub v17.4s, v17.4s, v15.4s\n"
+    "fadd v19.4s, v19.4s, v13.4s\n"
+    "fmls v17.4s, v13.4s, v0.s[1]\n"
+    "fsub v20.4s, v20.4s, v13.4s\n"
+    "fmla v18.4s, v21.4s, v0.s[2]\n"
+    "fmla v23.4s, v14.4s, v0.s[2]\n"
+    "mov v15.16b, v19.16b\n"
+    "mov v14.16b, v20.16b\n"
+    "mov v24.16b, v16.16b\n"
+    "fmls v18.4s, v13.4s, v0.s[3]\n"
+    "fmla v15.4s, v10.4s, v0.s[2]\n"
+    "fmls v23.4s, v1.4s, v0.s[3]\n"
+    "fmla v14.4s, v9.4s, v0.s[2]\n"
+    "fmla v24.4s, v7.4s, v0.s[2]\n"
+    "mov v10.16b, v17.16b\n"
+    "fmls v15.4s, v2.4s, v0.s[3]\n"
+    "mov v7.16b, v18.16b\n"
+    "str s23, [%[outptr0]]\n"
+    "fmls v14.4s, v3.4s, v0.s[3]\n"
+    "fmls v24.4s, v4.4s, v0.s[3]\n"
+    "fmla v10.4s, v8.4s, v0.s[2]\n"
+    "str s15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v7.4s, v11.4s, v0.s[2]\n"
+    "str s14, [%[outptr0], x11]\n"
+    "fmls v10.4s, v5.4s, v0.s[3]\n"
+    "str s24, [%[outptr0], x13]\n"
+    "fmls v7.4s, v6.4s, v0.s[3]\n"
+    "str s10, [%[outptr0], x23]\n"
+    "str s7, [%[outptr0], x15]\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "mov v26.16b, v12.16b\n"
+    "mov v25.16b, v19.16b\n"
+    "ldr s11, [x25, x20]\n"
+    "mov v10.16b, v11.16b\n"
+    "ldr s23, [x25, x18]\n"
+    "mov v9.16b, v11.16b\n"
+    "ldr s7, [x25]\n"
+    "fmla v10.4s, v7.4s, v0.s[2]\n"
+    "ldr s13, [x25, x21]\n"
+    "mov v7.16b, v11.16b\n"
+    "ldr s31, [x25, x19]\n"
+    "mov v8.16b, v11.16b\n"
+    "ldr s21, [x25, %[input_col_stride1]]\n"
+    "fmls v10.4s, v23.4s, v0.s[3]\n"
+    "ldr s30, [x26, x20]\n"
+    "fmls v9.4s, v21.4s, v0.s[2]\n"
+    "ldr s29, [x26, x18]\n"
+    "fmla v7.4s, v21.4s, v0.s[2]\n"
+    "ldr s22, [x26]\n"
+    "fmls v8.4s, v21.4s, v0.s[1]\n"
+    "ldr s24, [x26, x21]\n"
+    "fmls v9.4s, v23.4s, v0.s[2]\n"
+    "ldr s27, [x26, x19]\n"
+    "fmls v7.4s, v23.4s, v0.s[2]\n"
+    "ldr s28, [x26, %[input_col_stride1]]\n"
+    "fsub v8.4s, v8.4s, v23.4s\n"
+    "add x25, x25, #4\n"
+    "fadd v9.4s, v9.4s, v31.4s\n"
+    "add x26, x26, #4\n"
+    "fsub v7.4s, v7.4s, v31.4s\n"
+    "fmla v8.4s, v31.4s, v0.s[1]\n"
+    "mov v11.16b, v11.16b\n"
+    "mov v15.16b, v13.16b\n"
+    "mov v14.16b, v30.16b\n"
+    "mov v13.16b, v30.16b\n"
+    "fmla v11.4s, v21.4s, v0.s[1]\n"
+    "fmla v15.4s, v21.4s, v0.s[2]\n"
+    "fmla v14.4s, v22.4s, v0.s[2]\n"
+    "fmls v13.4s, v28.4s, v0.s[2]\n"
+    "mov v21.16b, v30.16b\n"
+    "mov v22.16b, v30.16b\n"
+    "fsub v11.4s, v11.4s, v23.4s\n"
+    "fmls v15.4s, v31.4s, v0.s[3]\n"
+    "fmls v11.4s, v31.4s, v0.s[1]\n"
+    "fmls v14.4s, v29.4s, v0.s[3]\n"
+    "fmls v13.4s, v29.4s, v0.s[2]\n"
+    "fmla v21.4s, v28.4s, v0.s[2]\n"
+    "fmls v22.4s, v28.4s, v0.s[1]\n"
+    "mov v23.16b, v30.16b\n"
+    "mov v24.16b, v24.16b\n"
+    "fmls v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v13.4s, v13.4s, v27.4s\n"
+    "fmls v21.4s, v29.4s, v0.s[2]\n"
+    "fsub v22.4s, v22.4s, v29.4s\n"
+    "fmla v23.4s, v28.4s, v0.s[1]\n"
+    "fmla v22.4s, v27.4s, v0.s[1]\n"
+    "fmla v24.4s, v28.4s, v0.s[2]\n"
+    "fsub v21.4s, v21.4s, v27.4s\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fsub v23.4s, v23.4s, v29.4s\n"
+    "fmls v25.4s, v9.4s, v0.s[2]\n"
+    "fmls v23.4s, v27.4s, v0.s[1]\n"
+    "fmls v24.4s, v27.4s, v0.s[3]\n"
+    "fadd v26.4s, v26.4s, v14.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str s26, [x28]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "fmls v27.4s, v7.4s, v0.s[2]\n"
+    "mov v31.16b, v16.16b\n"
+    "mov v30.16b, v17.16b\n"
+    "mov v29.16b, v18.16b\n"
+    "fadd v25.4s, v25.4s, v13.4s\n"
+    "fmls v31.4s, v8.4s, v0.s[2]\n"
+    "str s25, [x28, %[output_col_stride1]]\n"
+    "fmls v27.4s, v3.4s, v0.s[2]\n"
+    "fmls v30.4s, v11.4s, v0.s[2]\n"
+    "fmls v29.4s, v15.4s, v0.s[2]\n"
+    "fmls v31.4s, v4.4s, v0.s[2]\n"
+    "mov v26.16b, v12.16b\n"
+    "fadd v27.4s, v27.4s, v21.4s\n"
+    "mov v25.16b, v19.16b\n"
+    "str s27, [x28, x11]\n"
+    "fmls v30.4s, v5.4s, v0.s[2]\n"
+    "fadd v31.4s, v31.4s, v22.4s\n"
+    "fmls v29.4s, v6.4s, v0.s[2]\n"
+    "str s31, [x28, x13]\n"
+    "fmla v26.4s, v10.4s, v0.s[2]\n"
+    "fadd v30.4s, v30.4s, v23.4s\n"
+    "fmla v25.4s, v9.4s, v0.s[2]\n"
+    "str s30, [x28, x23]\n"
+    "fadd v29.4s, v29.4s, v24.4s\n"
+    "str s29, [x28, x15]\n"
+    "fmls v26.4s, v1.4s, v0.s[2]\n"
+    "fmls v25.4s, v2.4s, v0.s[2]\n"
+    "add x28, x28, #4\n"
+    "mov v30.16b, v20.16b\n"
+    "mov v29.16b, v16.16b\n"
+    "fsub v26.4s, v26.4s, v14.4s\n"
+    "mov v28.16b, v17.16b\n"
+    "str s26, [x22]\n"
+    "fsub v25.4s, v25.4s, v13.4s\n"
+    "str s25, [x22, %[output_col_stride1]]\n"
+    "fmla v30.4s, v7.4s, v0.s[2]\n"
+    "fmla v29.4s, v8.4s, v0.s[2]\n"
+    "fmla v28.4s, v11.4s, v0.s[2]\n"
+    "mov v26.16b, v18.16b\n"
+    "mov v25.16b, v12.16b\n"
+    "fmls v30.4s, v3.4s, v0.s[2]\n"
+    "mov v31.16b, v19.16b\n"
+    "fmls v29.4s, v4.4s, v0.s[2]\n"
+    "fmls v28.4s, v5.4s, v0.s[2]\n"
+    "fmla v26.4s, v15.4s, v0.s[2]\n"
+    "fmls v25.4s, v10.4s, v0.s[1]\n"
+    "fsub v30.4s, v30.4s, v21.4s\n"
+    "fmls v31.4s, v9.4s, v0.s[1]\n"
+    "str s30, [x22, x11]\n"
+    "fsub v29.4s, v29.4s, v22.4s\n"
+    "str s29, [x22, x13]\n"
+    "fsub v28.4s, v28.4s, v23.4s\n"
+    "str s28, [x22, x23]\n"
+    "fmls v26.4s, v6.4s, v0.s[2]\n"
+    "fsub v25.4s, v25.4s, v1.4s\n"
+    "fsub v31.4s, v31.4s, v2.4s\n"
+    "fmla v25.4s, v14.4s, v0.s[1]\n"
+    "fmla v31.4s, v13.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v24.4s\n"
+    "mov v27.16b, v20.16b\n"
+    "str s26, [x22, x15]\n"
+    "mov v26.16b, v16.16b\n"
+    "str s25, [x12]\n"
+    "fmls v27.4s, v7.4s, v0.s[1]\n"
+    "str s31, [x12, %[output_col_stride1]]\n"
+    "fmls v26.4s, v8.4s, v0.s[1]\n"
+    "mov v25.16b, v17.16b\n"
+    "add x22, x22, #4\n"
+    "fsub v27.4s, v27.4s, v3.4s\n"
+    "mov v28.16b, v18.16b\n"
+    "fmla v27.4s, v21.4s, v0.s[1]\n"
+    "fsub v26.4s, v26.4s, v4.4s\n"
+    "fmla v26.4s, v22.4s, v0.s[1]\n"
+    "fmls v25.4s, v11.4s, v0.s[1]\n"
+    "fmls v28.4s, v15.4s, v0.s[1]\n"
+    "mov v12.16b, v12.16b\n"
+    "str s27, [x12, x11]\n"
+    "mov v19.16b, v19.16b\n"
+    "str s26, [x12, x13]\n"
+    "fsub v25.4s, v25.4s, v5.4s\n"
+    "fmla v25.4s, v23.4s, v0.s[1]\n"
+    "fsub v28.4s, v28.4s, v6.4s\n"
+    "fmla v28.4s, v24.4s, v0.s[1]\n"
+    "fmla v12.4s, v10.4s, v0.s[1]\n"
+    "fmla v19.4s, v9.4s, v0.s[1]\n"
+    "mov v20.16b, v20.16b\n"
+    "str s25, [x12, x23]\n"
+    "mov v16.16b, v16.16b\n"
+    "str s28, [x12, x15]\n"
+    "fsub v12.4s, v12.4s, v1.4s\n"
+    "fmls v12.4s, v14.4s, v0.s[1]\n"
+    "add x12, x12, #4\n"
+    "fsub v19.4s, v19.4s, v2.4s\n"
+    "fmla v20.4s, v7.4s, v0.s[1]\n"
+    "fmls v19.4s, v13.4s, v0.s[1]\n"
+    "fmla v16.4s, v8.4s, v0.s[1]\n"
+    "str s12, [x14]\n"
+    "mov v1.16b, v17.16b\n"
+    "fsub v20.4s, v20.4s, v3.4s\n"
+    "mov v17.16b, v18.16b\n"
+    "str s19, [x14, %[output_col_stride1]]\n"
+    "fmls v20.4s, v21.4s, v0.s[1]\n"
+    "fsub v16.4s, v16.4s, v4.4s\n"
+    "fmla v1.4s, v11.4s, v0.s[1]\n"
+    "fmls v16.4s, v22.4s, v0.s[1]\n"
+    "fmla v17.4s, v15.4s, v0.s[1]\n"
+    "str s20, [x14, x11]\n"
+    "fsub v1.4s, v1.4s, v5.4s\n"
+    "str s16, [x14, x13]\n"
+    "fmls v1.4s, v23.4s, v0.s[1]\n"
+    "fsub v17.4s, v17.4s, v6.4s\n"
+    "fmls v17.4s, v24.4s, v0.s[1]\n"
+    "str s1, [x14, x23]\n"
+    "str s17, [x14, x15]\n"
+    "add x14, x14, #4\n"
+    "ldr s2, [x27, x20]\n"
+    "mov v4.16b, v2.16b\n"
+    "ldr s17, [x27, x18]\n"
+    "mov v12.16b, v2.16b\n"
+    "ldr s18, [x27]\n"
+    "fmla v4.4s, v18.4s, v0.s[2]\n"
+    "ldr s3, [x27, x21]\n"
+    "mov v6.16b, v2.16b\n"
+    "ldr s5, [x27, x19]\n"
+    "mov v1.16b, v2.16b\n"
+    "ldr s18, [x27, %[input_col_stride1]]\n"
+    "fmls v4.4s, v17.4s, v0.s[3]\n"
+    "add x27, x27, #4\n"
+    "fmls v12.4s, v18.4s, v0.s[2]\n"
+    "fmla v6.4s, v18.4s, v0.s[2]\n"
+    "fmls v1.4s, v18.4s, v0.s[1]\n"
+    "mov v2.16b, v2.16b\n"
+    "mov v3.16b, v3.16b\n"
+    "mov v4.16b, v4.16b\n"
+    "fmls v12.4s, v17.4s, v0.s[2]\n"
+    "fmls v6.4s, v17.4s, v0.s[2]\n"
+    "fsub v1.4s, v1.4s, v17.4s\n"
+    "fmla v2.4s, v18.4s, v0.s[1]\n"
+    "fmla v1.4s, v5.4s, v0.s[1]\n"
+    "fmla v3.4s, v18.4s, v0.s[2]\n"
+    "fadd v12.4s, v12.4s, v5.4s\n"
+    "fsub v6.4s, v6.4s, v5.4s\n"
+    "fsub v2.4s, v2.4s, v17.4s\n"
+    "fmla v4.4s, v10.4s, v0.s[2]\n"
+    "fmls v2.4s, v5.4s, v0.s[1]\n"
+    "fmls v3.4s, v5.4s, v0.s[3]\n"
+    "mov v16.16b, v12.16b\n"
+    "mov v5.16b, v6.16b\n"
+    "fmls v4.4s, v14.4s, v0.s[3]\n"
+    "mov v6.16b, v1.16b\n"
+    "fmla v16.4s, v9.4s, v0.s[2]\n"
+    "fmla v5.4s, v7.4s, v0.s[2]\n"
+    "fmla v6.4s, v8.4s, v0.s[2]\n"
+    "mov v9.16b, v2.16b\n"
+    "str s4, [x24]\n"
+    "mov v10.16b, v3.16b\n"
+    "fmls v16.4s, v13.4s, v0.s[3]\n"
+    "fmls v5.4s, v21.4s, v0.s[3]\n"
+    "fmls v6.4s, v22.4s, v0.s[3]\n"
+    "fmla v9.4s, v11.4s, v0.s[2]\n"
+    "fmla v10.4s, v15.4s, v0.s[2]\n"
+    "str s16, [x24, %[output_col_stride1]]\n"
+    "str s5, [x24, x11]\n"
+    "fmls v9.4s, v23.4s, v0.s[3]\n"
+    "str s6, [x24, x13]\n"
+    "fmls v10.4s, v24.4s, v0.s[3]\n"
+    "str s9, [x24, x23]\n"
+    "str s10, [x24, x15]\n"
+    "add x24, x24, #4\n"
+    "4:\n"
+    : [outptr0] "+r" (matrix_base),
+      [n_channels] "+r" (n_channels),
+      [inptr0] "+r" (input_base)
+    : [pcoeffs] "r" (pcoeffs),
+      [output_row_stride] "r" (6 * matrix_stride * sizeof(float)),
+      [output_col_stride1] "r" (matrix_stride * sizeof(float)),
+      [input_row_stride] "r" (input_row_stride * sizeof(float)),
+      [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
+      "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+      "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8",
+      "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
+      "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
+
+#else  // __arm__ not __aarch64__
+
+template <>
+void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* const input_base,
+  const int input_row_stride,
+  const int input_col_stride,
+  float* outptr,
+  const int matrix_stride
+)
+{
+  constexpr int inner_tile_rows = 6;
+  constexpr int inner_tile_cols = 6;
+
+  // Get pointers into the input tile
+  const float *x_ptrs[inner_tile_rows][inner_tile_cols];
+  for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
+  {
+    // Get a pointer into the row
+    const float* const row_ptr = input_base + xi*input_row_stride;
+
+    for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
+    {
+      x_ptrs[i][j] = row_ptr + xj*input_col_stride;
+    }
+  }
+
+  // Matrices used/computed in this kernel.
+  float x[inner_tile_rows][inner_tile_cols];
+  float XTx[inner_tile_rows][inner_tile_cols];
+  float U[inner_tile_rows][inner_tile_cols];
+  for (int i = 0; i < inner_tile_rows; i++)
+  {
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      x[i][j] = XTx[i][j] = 0.0f;
+    }
+  }
+
+  // Perform the Winograd input transformation for each channel in the input
+  // tensor.
+  int channels_remaining = n_channels;
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    // Matrices used/computed in this kernel
+    float32x2_t x[inner_tile_rows][inner_tile_cols];
+    float32x2_t XTx[inner_tile_rows][inner_tile_cols];
+    float32x2_t U[inner_tile_rows][inner_tile_cols];
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vdup_n_f32(0.0f);
+        XTx[i][j] = vdup_n_f32(0.0f);
+      }
+    }
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = vld1_f32(x_ptrs[i][j]);
+        x_ptrs[i][j] += 2;
+      }
+    }
+
+    // Compute XT . x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      // XTx[0][j] =  4*x[0][j] + -5*x[2][j] +  1*x[4][j];
+      XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
+
+      // XTx[1][j] = -4*x[1][j] + -4*x[2][j] +  1*x[3][j] +  1*x[4][j];
+      XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
+
+      // XTx[2][j] =  4*x[1][j] + -4*x[2][j] + -1*x[3][j] +  1*x[4][j];
+      XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
+
+      // XTx[3][j] = -2*x[1][j] + -1*x[2][j] +  2*x[3][j] +  1*x[4][j];
+      XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
+
+      // XTx[4][j] =  2*x[1][j] + -1*x[2][j] + -2*x[3][j] +  1*x[4][j];
+      XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
+
+      // XTx[5][j] =  4*x[1][j] + -5*x[3][j] +  1*x[5][j];
+      XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
+    }
+
+    // Compute U = XT . x . X
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      // U[i][0] =  4*XTx[i][0] + -5*XTx[i][2] +  1*XTx[i][4];
+      U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
+
+      // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] +  1*XTx[i][3] +  1*XTx[i][4];
+      U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+      // U[i][2] =  4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] +  1*XTx[i][4];
+      U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
+
+      // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] +  2*XTx[i][3] +  1*XTx[i][4];
+      U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
+
+      // U[i][4] =  2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] +  1*XTx[i][4];
+      U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
+
+      // U[i][5] =  4*XTx[i][1] + -5*XTx[i][3] +  1*XTx[i][5];
+      U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
+    }
+
+    // Store the transformed matrix
+    for (int i = 0, m = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++, m++)
+      {
+        vst1_f32(outptr + m*matrix_stride, U[i][j]);
+      }
+    }
+    outptr += 2;
+  }
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Load x
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        x[i][j] = *(x_ptrs[i][j]++);
+      }
+    }
+
+    // Compute XT . x
+    for (int j = 0; j < inner_tile_cols; j++)
+    {
+      XTx[0][j] =  4*x[0][j] + -5*x[2][j] +  1*x[4][j];
+      XTx[1][j] = -4*x[1][j] + -4*x[2][j] +  1*x[3][j] +  1*x[4][j];
+      XTx[2][j] =  4*x[1][j] + -4*x[2][j] + -1*x[3][j] +  1*x[4][j];
+      XTx[3][j] = -2*x[1][j] + -1*x[2][j] +  2*x[3][j] +  1*x[4][j];
+      XTx[4][j] =  2*x[1][j] + -1*x[2][j] + -2*x[3][j] +  1*x[4][j];
+      XTx[5][j] =  4*x[1][j] + -5*x[3][j] +  1*x[5][j];
+    }
+
+    // Compute U = XT . x . X
+    for (int i = 0; i < inner_tile_rows; i++)
+    {
+      U[i][0] =  4*XTx[i][0] + -5*XTx[i][2] +  1*XTx[i][4];
+      U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] +  1*XTx[i][3] +  1*XTx[i][4];
+      U[i][2] =  4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] +  1*XTx[i][4];
+      U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] +  2*XTx[i][3] +  1*XTx[i][4];
+      U[i][4] =  2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] +  1*XTx[i][4];
+      U[i][5] =  4*XTx[i][1] + -5*XTx[i][3] +  1*XTx[i][5];
+    }
+
+    // Store the transformed matrix
+    for (int i = 0, m = 0; i < inner_tile_rows; i++)
+    {
+      for (int j = 0; j < inner_tile_cols; j++, m++)
+      {
+        *(outptr + m*matrix_stride) = U[i][j];
+      }
+    }
+    outptr++;
+  }
+}
+
+#endif
+
+template class InputTransform<6, 6, float, float, WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
new file mode 100644
index 0000000..e45f186
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "winograd.hpp"
+using namespace winograd;
+
+#define MEMBERFN(RTYPE) template <\
+  int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE WeightTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
+
+MEMBERFN()::WeightTransform(
+  const int n_output_channels,
+  const int n_input_channels
+) : _n_output_channels(n_output_channels), _n_input_channels(n_input_channels),
+    _matrices(nullptr), _matrix_stride(0), _matrix_row_stride(0), _weights(nullptr)
+{
+
+}
+
+MEMBERFN(void)::set_weight_tensor(const void * const weights)
+{
+  _weights = static_cast<const TIn *>(weights);
+}
+
+MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow)
+{
+  _matrices = static_cast<TOut *>(mptr);
+  _matrix_stride = ldmatrix;
+  _matrix_row_stride = ldrow;
+}
+
+MEMBERFN(size_t)::get_working_space_size(unsigned int) const
+{
+  return 0;
+}
+
+MEMBERFN(void)::set_working_space(void *)
+{
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
+{
+  // TODO When the weights transform supports multithreading, return the number
+  // of output channels. For now we return 1 to indicate that the weights must
+  // be transformed as a single block.
+  // return n_output_channels;
+  return 1;
+}
+
+MEMBERFN(void)::run(const unsigned int, const unsigned int, unsigned int)
+{
+  execute(
+    _n_output_channels, _n_input_channels, _weights,
+    _matrices, _matrix_stride, _matrix_row_stride
+  );
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
new file mode 100644
index 0000000..d97af21
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include "winograd.hpp"
+#include "padding.hpp"
+#include "utils.hpp"
+
+#define MEMBERFN(RTYPE) template<\
+  int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols,\
+  typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots>
+
+#define Nx1MEMBERFN(RTYPE) template<\
+  int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\
+> RTYPE OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots>
+
+namespace winograd
+{
+
+MEMBERFN()::OutputTransform(
+  const int n_batches,
+  const int n_rows,
+  const int n_cols,
+  const int n_channels
+) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels),
+    _matrix_base(nullptr),
+    _biases(nullptr),
+    _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0),
+    _outptr(nullptr),
+    _tiles_M(iceildiv(n_rows, output_tile_rows)),
+    _tiles_N(iceildiv(n_cols, output_tile_cols)),
+    _out_col_stride(0), _out_row_stride(0), _out_batch_stride(0),
+    _working_space_col_stride(n_channels),
+    _working_space_row_stride(output_tile_cols * _working_space_col_stride),
+    _working_space(nullptr)
+{
+}
+
+MEMBERFN(void)::set_input_matrices(const void * const mptr, const int ldmatrix, const int ldrow)
+{
+  _matrix_base = static_cast<const TIn *>(mptr);
+  _matrix_stride = ldmatrix;
+  _matrix_row_stride = ldrow;
+  _matrix_batch_stride = _tiles_M * _tiles_N * ldrow;
+}
+
+MEMBERFN(void)::set_bias(const void * const bias)
+{
+  _biases = static_cast<const TOut *>(bias);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr)
+{
+  set_output_tensor(outptr, _n_channels);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
+{
+  set_output_tensor(outptr, _n_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
+{
+  set_output_tensor(outptr, _n_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+  _outptr = static_cast<TOut *>(outptr);
+  _out_batch_stride = ldbatch;
+  _out_row_stride = ldrow;
+  _out_col_stride = ldcol;
+}
+
+Nx1MEMBERFN()::OutputTransform(
+  const int n_batches,
+  const int n_rows,
+  const int n_cols,
+  const int n_channels
+) : OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::OutputTransform(
+    n_batches, n_cols, n_rows, n_channels /* Transpose rows and columns */
+  )
+{
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr)
+{
+  set_output_tensor(outptr, this->_n_channels);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol)
+{
+  set_output_tensor(outptr, this->_n_cols * ldcol, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol)
+{
+  set_output_tensor(outptr, this->_n_rows * ldrow, ldrow, ldcol);
+}
+
+Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol)
+{
+  // Transpose rows and columns
+  Base::set_output_tensor(outptr, ldbatch, ldcol, ldrow);
+}
+
+MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const
+{
+  return sizeof(TOut) * output_tile_rows * _working_space_row_stride * nthreads;
+}
+
+MEMBERFN(void)::set_working_space(void * const buffer)
+{
+  _working_space = static_cast<TOut *>(buffer);
+}
+
+MEMBERFN(unsigned int)::get_window(void) const
+{
+  return iceildiv(_n_channels, WINDOW_BLOCK);
+}
+
+MEMBERFN(void)::run(
+  const unsigned int start,
+  const unsigned int stop,
+  const unsigned int threadid
+)
+{
+  // Determine the channels on which to work
+  if (start >= get_window())
+  {
+    return;  // No work to do beyond the end of the window
+  }
+  const unsigned int start_channel = start * WINDOW_BLOCK;
+  const unsigned int stop_channel = std::min<unsigned int>(_n_channels, stop * WINDOW_BLOCK);
+  const unsigned int n_channels = stop_channel - start_channel;
+
+  const auto matrix_tile_col_stride = _matrix_row_stride;
+  const auto matrix_tile_row_stride = _tiles_N * matrix_tile_col_stride;
+
+  const TOut* const bptr = (_biases == nullptr) ? nullptr : _biases + start_channel;
+
+  // Loop over batches
+  for (int batch = 0; batch < _n_batches; batch++)
+  {
+    const TIn* const matrix_batch = _matrix_base + start_channel + batch * _matrix_batch_stride;
+    TOut* const outptr_batch = _outptr + start_channel + batch * _out_batch_stride;
+
+    for (int tile_i = 0; tile_i < _tiles_M; tile_i++)
+    {
+      // Compute properties of the row of output tiles
+      const int row_pad_bottom = std::max(0, (tile_i + 1)*output_tile_rows - _n_rows);
+      const TIn* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride;
+      TOut* const outptr_row = outptr_batch + tile_i * output_tile_rows * _out_row_stride;
+
+      for (int tile_j = 0; tile_j < _tiles_N; tile_j++)
+      {
+        // Compute property of this specific tile
+        const int tile_pad_right = std::max(0, (tile_j + 1)*output_tile_cols - _n_cols);
+        const TIn* const matrix_tile = matrix_tile_row + tile_j * matrix_tile_col_stride;
+        TOut* const outptr_tile = outptr_row + tile_j * output_tile_cols * _out_col_stride;
+
+        // Perform the transformation
+        if (row_pad_bottom || tile_pad_right)
+        {
+          transform_cropped_tile(
+            threadid, n_channels, outptr_tile, matrix_tile, bptr,
+            row_pad_bottom, tile_pad_right
+          );
+        }
+        else
+        {
+          transform_uncropped_tile(
+            threadid, n_channels, outptr_tile, matrix_tile, bptr
+          );
+        }
+      }
+    }
+  }
+}
+
+MEMBERFN(void)::transform_uncropped_tile(
+  const unsigned int /* threadid unused */,
+  const int n_channels,
+  TOut * const outptr,
+  const TIn * const inptr,
+  const TOut * const biases
+)
+{
+  transform_tile(
+    n_channels, inptr, _matrix_stride, biases,
+    outptr, _out_row_stride, _out_col_stride
+  );
+}
+
+MEMBERFN(void)::transform_cropped_tile(
+  const unsigned int threadid,
+  const int n_channels,
+  TOut * const outptr,
+  const TIn * const inptr,
+  const TOut * const biases,
+  const int pad_bottom,
+  const int pad_right
+)
+{
+  // Transform into working space and then copy the relevant section out.
+  TOut *wsptr = static_cast<TOut *>(get_working_space(threadid));
+  transform_tile(
+    n_channels, inptr, _matrix_stride, biases,
+    wsptr, _working_space_row_stride, _working_space_col_stride
+  );
+
+  padding::crop_and_copy_tile(
+    output_tile_rows, output_tile_cols, n_channels,
+    wsptr, _working_space_row_stride, _working_space_col_stride,
+    outptr, _out_row_stride, _out_col_stride,
+    0u, 0u, pad_bottom, pad_right
+  );
+}
+
+MEMBERFN(void *)::get_working_space(const unsigned int threadid) const
+{
+  return _working_space + output_tile_rows * _working_space_row_stride * threadid;
+}
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
similarity index 71%
rename from src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
rename to src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
index ea842a4..c32d7f2 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,43 +22,29 @@
  * SOFTWARE.
  */
 
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm.hpp"
+#include "output.hpp"
 
-namespace
+namespace winograd
 {
 
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_2_7_fp32_process_tile(
+template <>
+void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
   const int n_channels,
-  const float* const matrix_base,
+  const float* inptr,
   const int matrix_stride,
-  const float* const biases,
+  const float* bptr,
   float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
+  const int,  // No need to stride across rows
+  const int output_col_stride
 )
 {
-  (void) output_row_stride;
-  (void) _pad_bottom;
-  constexpr int output_tile_cols = 2;
-  constexpr int inner_tile_cols = 8;
-
-  const int pad_right = Specialized ? PadRight : _pad_right;
-  const int cells_j = output_tile_cols - pad_right;
-
-
   // Construct a map to the output cells
-  float *outptrs[cells_j];
-  for (int j = 0; j < cells_j; j++)
+  float *outptrs[output_tile_cols];
+  for (int j = 0; j < output_tile_cols; j++)
   {
     outptrs[j] = output + j*output_col_stride;
   }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
 
   // For each channel of the output
   int channels_remaining = n_channels;
@@ -84,7 +70,7 @@
       b = vld1q_f32(bptr);
       bptr += 4;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1q_f32(outptrs[j], f[j] + b);
       outptrs[j] += 4;
@@ -111,7 +97,7 @@
       b = vld1_f32(bptr);
       bptr += 2;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1_f32(outptrs[j], f[j] + b);
       outptrs[j] += 2;
@@ -138,26 +124,14 @@
     {
       b = *(bptr++);
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       *(outptrs[j]++) = f[j] + b;
     }
   }
 }
-}  // namespace (anonymous)
 
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 7, 1, 8, float>;
+template class OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
 
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_2_7_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_2_7_fp32_process_tile<true, 1>
-};
-
-template class OutputTransform<1, 7, 1, 8, float>;
-template class OutputTransform<7, 1, 8, 1, float>;
 }  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..d6ebf44
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "output.hpp"
+
+namespace winograd
+{
+
+template <>
+void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* inptr,
+  const int matrix_stride,
+  const float* bptr,
+  float* const output,
+  const int output_row_stride,
+  const int output_col_stride
+)
+{
+  // Construct a map to the output cells
+  float *outptrs[output_tile_rows][output_tile_cols];
+  for (int i = 0; i < output_tile_rows; i++)
+  {
+    for (int j = 0; j < output_tile_cols; j++)
+    {
+      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+    }
+  }
+
+  // For each channel of the output
+  int channels_remaining = n_channels;
+#ifdef __aarch64__
+  for (; channels_remaining >= 4; channels_remaining -= 4)
+  {
+    // Matrices used and computed during this transform
+    float32x4_t F[4][4], FZ[4][2], f[2][2], b;
+
+    // Read a 4x4 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 4; i++)
+    {
+      for (int j = 0; j < 4; j++, m++)
+      {
+        F[i][j] = vld1q_f32(inptr + m*matrix_stride);
+      }
+    }
+    inptr += 4;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 4; i++)
+    {
+      // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
+      FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]);
+
+      // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
+      FZ[i][1] = vsubq_f32(vsubq_f32(F[i][1], F[i][2]), F[i][3]);
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
+      f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
+
+      // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
+      f[1][j] = vsubq_f32(vsubq_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
+    }
+
+    // Load the bias vector
+    if (bptr != nullptr)
+    {
+      b = vld1q_f32(bptr);
+      bptr += 4;
+    }
+    else
+    {
+      b = vdupq_n_f32(0.0f);
+    }
+
+    // Write out the output tile
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
+        outptrs[i][j] += 4;
+      }
+    }
+  }
+#endif  // __aarch64__
+#ifdef __arm_any__
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    // Matrices used and computed during this transform
+    float32x2_t F[4][4], FZ[4][2], f[2][2], b;
+
+    // Read a 4x4 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 4; i++)
+    {
+      for (int j = 0; j < 4; j++, m++)
+      {
+        F[i][j] = vld1_f32(inptr + m*matrix_stride);
+      }
+    }
+    inptr += 2;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 4; i++)
+    {
+      // FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
+      FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]);
+
+      // FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
+      FZ[i][1] = vsub_f32(vsub_f32(F[i][1], F[i][2]), F[i][3]);
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      // f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
+      f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]);
+
+      // f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
+      f[1][j] = vsub_f32(vsub_f32(FZ[1][j], FZ[2][j]), FZ[3][j]);
+    }
+
+    // Load the bias vector
+    if (bptr != nullptr)
+    {
+      b = vld1_f32(bptr);
+      bptr += 2;
+    }
+    else
+    {
+      b = vdup_n_f32(0.0f);
+    }
+
+    // Write out the output tile
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+        outptrs[i][j] += 2;
+      }
+    }
+  }
+#endif  // __arm_any__
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Matrices used and computed during this transform
+    float F[4][4], FZ[4][2], f[2][2], b;
+
+    // Read a 4x4 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 4; i++)
+    {
+      for (int j = 0; j < 4; j++, m++)
+      {
+        F[i][j] = *(inptr + m*matrix_stride);
+      }
+    }
+    inptr++;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 4; i++)
+    {
+      FZ[i][0] =  F[i][0] + F[i][1] + F[i][2];
+      FZ[i][1] =  F[i][1] - F[i][2] - F[i][3];
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      f[0][j] =  FZ[0][j] + FZ[1][j] + FZ[2][j];
+      f[1][j] =  FZ[1][j] - FZ[2][j] - FZ[3][j];
+    }
+
+    // Load the bias
+    if (bptr != nullptr)
+    {
+      b = *(bptr++);
+    }
+    else
+    {
+      b = 0.0f;
+    }
+
+    // Write out the output tile
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        *(outptrs[i][j]++) = f[i][j] + b;
+      }
+    }
+  }
+}
+
+template class OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
+
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..d93d9e2
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "output.hpp"
+#include "arm.hpp"
+
+namespace winograd
+{
+
+template <>
+void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* inptr,
+  const int matrix_stride,
+  const float* bptr,
+  float* const output,
+  const int output_row_stride,
+  const int output_col_stride
+)
+{
+  // Construct a map to the output cells
+  float *outptrs[output_tile_rows][output_tile_cols];
+  for (int i = 0; i < output_tile_rows; i++)
+  {
+    for (int j = 0; j < output_tile_cols; j++)
+    {
+      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+    }
+  }
+
+  // For each channel of the output
+  int channels_remaining = n_channels;
+#ifdef __aarch64__
+  for (; channels_remaining >= 4; channels_remaining -= 4)
+  {
+    // Matrices used and computed during this transform
+    float32x4_t F[6][6], FZ[6][2], f[2][2], b;
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 6; i++)
+    {
+      for (int j = 0; j < 6; j++, m++)
+      {
+        F[i][j] = vld1q_f32(inptr + m*matrix_stride);
+      }
+    }
+    inptr += 4;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 6; i++)
+    {
+      // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
+      FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]);
+
+      // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
+      FZ[i][1] = vaddq_f32(vmlaq_n_f32(vsubq_f32(F[i][1], F[i][2]), vsubq_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
+      f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+      // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
+      f[1][j] = vaddq_f32(vmlaq_n_f32(vsubq_f32(FZ[1][j], FZ[2][j]), vsubq_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
+    }
+
+    // Write out the output tile
+    if (bptr != nullptr)
+    {
+      b = vld1q_f32(bptr);
+      bptr += 4;
+    }
+    else
+    {
+      b = vdupq_n_f32(0.0f);
+    }
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        vst1q_f32(outptrs[i][j], vaddq_f32(f[i][j], b));
+        outptrs[i][j] += 4;
+      }
+    }
+  }
+#endif  // __aarch64__
+#ifdef __arm_any__
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    // Matrices used and computed during this transform
+    float32x2_t F[6][6], FZ[6][2], f[2][2], b;
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 6; i++)
+    {
+      for (int j = 0; j < 6; j++, m++)
+      {
+        F[i][j] = vld1_f32(inptr + m*matrix_stride);
+      }
+    }
+    inptr += 2;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 6; i++)
+    {
+      // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
+      FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
+
+      // FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
+      FZ[i][1] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f), F[i][5]);
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
+      f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+      // f[1][j] =               1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
+      f[1][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f), FZ[5][j]);
+    }
+
+    // Write out the output tile
+    if (bptr != nullptr)
+    {
+      b = vld1_f32(bptr);
+      bptr += 2;
+    }
+    else
+    {
+      b = vdup_n_f32(0.0f);
+    }
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+        outptrs[i][j] += 2;
+      }
+    }
+  }
+#endif  // __arm_any__
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Matrices used and computed during this transform
+    float F[6][6], FZ[6][2], f[2][2], b;
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 6; i++)
+    {
+      for (int j = 0; j < 6; j++, m++)
+      {
+        F[i][j] = *(inptr + m*matrix_stride);
+      }
+    }
+    inptr++;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 6; i++)
+    {
+      FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
+      FZ[i][1] =               1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4] +  1*F[i][5];
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 2; j++)
+    {
+      f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
+      f[1][j] =                1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j] +  1*FZ[5][j];
+    }
+
+    // Write out the output tile
+    if (bptr != nullptr)
+    {
+      b = *(bptr++);
+    }
+    else
+    {
+      b = 0.0f;
+    }
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        *(outptrs[i][j]++) = f[i][j] + b;
+      }
+    }
+  }
+}
+
+template class OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
+
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
similarity index 73%
rename from src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
rename to src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
index 911759b..7187ef2 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,42 +22,29 @@
  * SOFTWARE.
  */
 
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "output.hpp"
+#include "arm.hpp"
 
-namespace
+namespace winograd
 {
 
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_4_5_fp32_process_tile(
+template <>
+void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
   const int n_channels,
-  const float* const matrix_base,
+  const float* inptr,
   const int matrix_stride,
-  const float* const biases,
+  const float* bptr,
   float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
+  const int,  // No need to stride across rows
+  const int output_col_stride
 )
 {
-  (void) output_row_stride;
-  (void) _pad_bottom;
-  constexpr int output_tile_cols = 4;
-  constexpr int inner_tile_cols = 8;
-
-  const int pad_right = Specialized ? PadRight : _pad_right;
-  const int cells_j = output_tile_cols - pad_right;
-
   // Construct a map to the output cells
-  float *outptrs[cells_j];
-  for (int j = 0; j < cells_j; j++)
+  float *outptrs[output_tile_cols];
+  for (int j = 0; j < output_tile_cols; j++)
   {
     outptrs[j] = output + j*output_col_stride;
   }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
 
   // For each channel of the output
   int channels_remaining = n_channels;
@@ -85,7 +72,7 @@
       b = vld1q_f32(bptr);
       bptr += 4;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1q_f32(outptrs[j], f[j] + b);
       outptrs[j] += 4;
@@ -114,7 +101,7 @@
       b = vld1_f32(bptr);
       bptr += 2;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1_f32(outptrs[j], f[j] + b);
       outptrs[j] += 2;
@@ -143,29 +130,14 @@
     {
       b = *(bptr++);
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       *(outptrs[j]++) = f[j] + b;
     }
   }
 }
 
-}  // namespace (anonymous)
+template class OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
 
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 5, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_4_5_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_4_5_fp32_process_tile<true, 1>,
-  winograd_output_transform_4_5_fp32_process_tile<true, 2>,
-  winograd_output_transform_4_5_fp32_process_tile<true, 3>
-};
-
-template class OutputTransform<1, 5, 1, 8, float>;
-template class OutputTransform<5, 1, 8, 1, float>;
 }  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..fd16a4d
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,1855 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "output.hpp"
+
+namespace winograd
+{
+
+#ifdef __aarch64__
+
+template <>
+void OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile(
+  int n_channels,
+  const float* inptr,
+  const int matrix_stride,
+  const float* bptr,
+  float* output,
+  const int output_row_stride,
+  const int output_col_stride
+)
+{
+  const float coeffs[2] = {2.0f, 4.0f};
+  if (bptr != nullptr)
+  {
+    __asm__ __volatile__ (
+      "ldr d0, [%[pcoeffs]]\n"
+      "add x21, %[in_col_stride1], %[in_col_stride1]\n"
+      "add x22, x21, %[in_col_stride1]\n"
+      "add x25, %[inptr0], %[in_row_stride]\n"
+      "add x15, %[output_col_stride1], %[output_col_stride1]\n"
+      "add x23, x22, %[in_col_stride1]\n"
+      "add x13, x25, %[in_row_stride]\n"
+      "add x16, x15, %[output_col_stride1]\n"
+      "add x24, x23, %[in_col_stride1]\n"
+      "add x26, x13, %[in_row_stride]\n"
+      "add x17, %[outptr0], %[output_row_stride]\n"
+      "add x14, x26, %[in_row_stride]\n"
+      "add x28, x17, %[output_row_stride]\n"
+      "lsr x19, %[n_channels], #2\n"
+      "add x27, x14, %[in_row_stride]\n"
+      "add x18, x28, %[output_row_stride]\n"
+      "and x20, %[n_channels], #3\n"
+      "cbz x19, 4f\n"
+      "1:\n"
+      "ldr q19, [%[inptr0]]\n"
+      "subs x19, x19, #1\n"
+      "ldr q20, [%[inptr0], %[in_col_stride1]]\n"
+      "ldr q4, [%[inptr0], x21]\n"
+      "fadd v1.4s, v20.4s, v4.4s\n"
+      "ldr q17, [%[inptr0], x22]\n"
+      "fsub v7.4s, v20.4s, v4.4s\n"
+      "ldr q22, [%[inptr0], x23]\n"
+      "fadd v5.4s, v17.4s, v22.4s\n"
+      "ldr q18, [%[inptr0], x24]\n"
+      "fsub v10.4s, v17.4s, v22.4s\n"
+      "ldr q25, [x25]\n"
+      "fadd v8.4s, v19.4s, v1.4s\n"
+      "ldr q12, [x25, %[in_col_stride1]]\n"
+      "mov v4.16b, v1.16b\n"
+      "ldr q23, [x25, x21]\n"
+      "mov v1.16b, v7.16b\n"
+      "ldr q9, [x25, x22]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "ldr q11, [x25, x23]\n"
+      "fadd v8.4s, v8.4s, v5.4s\n"
+      "ldr q6, [x25, x24]\n"
+      "fmla v4.4s, v5.4s, v0.s[1]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "fmla v1.4s, v10.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v18.4s\n"
+      "beq 3f\n"
+      "2:\n"
+      "fadd v3.4s, v12.4s, v23.4s\n"
+      "ldr q2, [x13]\n"
+      "fadd v27.4s, v9.4s, v11.4s\n"
+      "ldr q21, [x13, %[in_col_stride1]]\n"
+      "fsub v16.4s, v12.4s, v23.4s\n"
+      "ldr q26, [x13, x21]\n"
+      "fsub v9.4s, v9.4s, v11.4s\n"
+      "ldr q17, [x13, x22]\n"
+      "fadd v14.4s, v25.4s, v3.4s\n"
+      "ldr q19, [x13, x23]\n"
+      "mov v11.16b, v3.16b\n"
+      "ldr q10, [x13, x24]\n"
+      "mov v3.16b, v16.16b\n"
+      "ldr q15, [x26]\n"
+      "fmul v9.4s, v9.4s, v0.s[0]\n"
+      "ldr q12, [x26, %[in_col_stride1]]\n"
+      "fadd v14.4s, v14.4s, v27.4s\n"
+      "ldr q20, [x26, x21]\n"
+      "fmla v11.4s, v27.4s, v0.s[1]\n"
+      "ldr q24, [x26, x22]\n"
+      "fadd v23.4s, v21.4s, v26.4s\n"
+      "ldr q29, [x26, x23]\n"
+      "fadd v13.4s, v16.4s, v9.4s\n"
+      "ldr q5, [x26, x24]\n"
+      "fmla v3.4s, v9.4s, v0.s[1]\n"
+      "ldr q18, [x14]\n"
+      "fadd v30.4s, v17.4s, v19.4s\n"
+      "add %[inptr0], %[inptr0], #16\n"
+      "fadd v16.4s, v2.4s, v23.4s\n"
+      "add x25, x25, #16\n"
+      "fsub v21.4s, v21.4s, v26.4s\n"
+      "ldr q22, [x14, %[in_col_stride1]]\n"
+      "fadd v3.4s, v3.4s, v6.4s\n"
+      "ldr q28, [x14, x21]\n"
+      "fsub v19.4s, v17.4s, v19.4s\n"
+      "add x13, x13, #16\n"
+      "fadd v16.4s, v16.4s, v30.4s\n"
+      "add x26, x26, #16\n"
+      "mov v17.16b, v23.16b\n"
+      "subs x19, x19, #1\n"
+      "fadd v26.4s, v12.4s, v20.4s\n"
+      "fsub v9.4s, v12.4s, v20.4s\n"
+      "fmul v19.4s, v19.4s, v0.s[0]\n"
+      "ldr q20, [x14, x22]\n"
+      "fmla v17.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v24.4s, v29.4s\n"
+      "fsub v12.4s, v24.4s, v29.4s\n"
+      "fadd v24.4s, v22.4s, v28.4s\n"
+      "fadd v23.4s, v15.4s, v26.4s\n"
+      "mov v15.16b, v26.16b\n"
+      "fsub v22.4s, v22.4s, v28.4s\n"
+      "fadd v29.4s, v14.4s, v16.4s\n"
+      "fsub v16.4s, v14.4s, v16.4s\n"
+      "ldr q28, [x14, x23]\n"
+      "fmul v12.4s, v12.4s, v0.s[0]\n"
+      "fmla v15.4s, v25.4s, v0.s[1]\n"
+      "fadd v23.4s, v23.4s, v25.4s\n"
+      "mov v6.16b, v21.16b\n"
+      "fadd v30.4s, v21.4s, v19.4s\n"
+      "fadd v26.4s, v18.4s, v24.4s\n"
+      "mov v25.16b, v24.16b\n"
+      "fadd v18.4s, v8.4s, v29.4s\n"
+      "fmla v6.4s, v19.4s, v0.s[1]\n"
+      "fadd v27.4s, v20.4s, v28.4s\n"
+      "fsub v21.4s, v20.4s, v28.4s\n"
+      "mov v19.16b, v29.16b\n"
+      "fadd v29.4s, v13.4s, v30.4s\n"
+      "fsub v8.4s, v13.4s, v30.4s\n"
+      "fadd v14.4s, v9.4s, v12.4s\n"
+      "fadd v6.4s, v6.4s, v10.4s\n"
+      "ldr q20, [x14, x24]\n"
+      "fadd v26.4s, v26.4s, v27.4s\n"
+      "add x14, x14, #16\n"
+      "fmla v9.4s, v12.4s, v0.s[1]\n"
+      "ldr q24, [x27]\n"
+      "fmul v21.4s, v21.4s, v0.s[0]\n"
+      "fmla v25.4s, v27.4s, v0.s[1]\n"
+      "fadd v10.4s, v7.4s, v29.4s\n"
+      "ldr q2, [%[bptr]]\n"
+      "mov v7.16b, v29.16b\n"
+      "add %[bptr], %[bptr], #16\n"
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "fadd v13.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "fadd v27.4s, v11.4s, v17.4s\n"
+      "fsub v11.4s, v11.4s, v17.4s\n"
+      "fadd v30.4s, v15.4s, v25.4s\n"
+      "fsub v15.4s, v15.4s, v25.4s\n"
+      "ldr q28, [x27, %[in_col_stride1]]\n"
+      "fadd v18.4s, v18.4s, v13.4s\n"
+      "fmla v19.4s, v13.4s, v0.s[1]\n"
+      "fadd v26.4s, v22.4s, v21.4s\n"
+      "mov v12.16b, v22.16b\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fadd v17.4s, v4.4s, v27.4s\n"
+      "fmul v15.4s, v15.4s, v0.s[0]\n"
+      "mov v4.16b, v27.16b\n"
+      "fmla v12.4s, v21.4s, v0.s[1]\n"
+      "ldr q22, [x27, x21]\n"
+      "fadd v18.4s, v18.4s, v2.4s\n"
+      "fadd v19.4s, v19.4s, v2.4s\n"
+      "fadd v17.4s, v17.4s, v30.4s\n"
+      "fmla v4.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v28.4s, v22.4s\n"
+      "fsub v27.4s, v28.4s, v22.4s\n"
+      "fadd v12.4s, v12.4s, v20.4s\n"
+      "ldr q29, [x27, x22]\n"
+      "str q18, [%[outptr0]]\n"
+      "fadd v22.4s, v16.4s, v23.4s\n"
+      "str q19, [x28]\n"
+      "fadd v28.4s, v24.4s, v25.4s\n"
+      "ldr q30, [x27, x23]\n"
+      "fadd v20.4s, v29.4s, v30.4s\n"
+      "fsub v18.4s, v29.4s, v30.4s\n"
+      "mov v21.16b, v25.16b\n"
+      "ldr q25, [x27, x24]\n"
+      "fmla v16.4s, v23.4s, v0.s[1]\n"
+      "ldr q19, [%[inptr0]]\n"
+      "fadd v17.4s, v17.4s, v2.4s\n"
+      "add x27, x27, #16\n"
+      "fadd v28.4s, v28.4s, v20.4s\n"
+      "fmul v18.4s, v18.4s, v0.s[0]\n"
+      "fmla v21.4s, v20.4s, v0.s[1]\n"
+      "ldr q20, [%[inptr0], %[in_col_stride1]]\n"
+      "fadd v22.4s, v22.4s, v2.4s\n"
+      "fadd v4.4s, v4.4s, v2.4s\n"
+      "str q17, [%[outptr0], x15]\n"
+      "mov v24.16b, v27.16b\n"
+      "fadd v23.4s, v27.4s, v18.4s\n"
+      "fadd v16.4s, v16.4s, v28.4s\n"
+      "fadd v13.4s, v14.4s, v26.4s\n"
+      "fsub v30.4s, v14.4s, v26.4s\n"
+      "str q22, [x17]\n"
+      "fmla v24.4s, v18.4s, v0.s[1]\n"
+      "str q4, [x28, x15]\n"
+      "mov v14.16b, v8.16b\n"
+      "fadd v29.4s, v11.4s, v15.4s\n"
+      "ldr q4, [%[inptr0], x21]\n"
+      "fadd v10.4s, v10.4s, v13.4s\n"
+      "ldr q17, [%[inptr0], x22]\n"
+      "fadd v24.4s, v24.4s, v25.4s\n"
+      "ldr q22, [%[inptr0], x23]\n"
+      "fmul v30.4s, v30.4s, v0.s[0]\n"
+      "fmla v7.4s, v13.4s, v0.s[1]\n"
+      "mov v26.16b, v11.16b\n"
+      "fadd v13.4s, v3.4s, v6.4s\n"
+      "fsub v3.4s, v3.4s, v6.4s\n"
+      "ldr q18, [%[inptr0], x24]\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "fadd v29.4s, v29.4s, v2.4s\n"
+      "fadd v8.4s, v8.4s, v30.4s\n"
+      "fmla v14.4s, v30.4s, v0.s[1]\n"
+      "fmla v26.4s, v15.4s, v0.s[1]\n"
+      "ldr q25, [x25]\n"
+      "fadd v27.4s, v9.4s, v12.4s\n"
+      "fadd v1.4s, v1.4s, v13.4s\n"
+      "str q10, [%[outptr0], %[output_col_stride1]]\n"
+      "fsub v6.4s, v9.4s, v12.4s\n"
+      "str q29, [x17, x15]\n"
+      "fadd v14.4s, v14.4s, v23.4s\n"
+      "fadd v26.4s, v26.4s, v21.4s\n"
+      "ldr q12, [x25, %[in_col_stride1]]\n"
+      "fadd v1.4s, v1.4s, v27.4s\n"
+      "ldr q23, [x25, x21]\n"
+      "fmul v6.4s, v6.4s, v0.s[0]\n"
+      "ldr q9, [x25, x22]\n"
+      "mov v5.16b, v13.16b\n"
+      "ldr q11, [x25, x23]\n"
+      "mov v13.16b, v3.16b\n"
+      "fadd v8.4s, v8.4s, v2.4s\n"
+      "fadd v1.4s, v1.4s, v2.4s\n"
+      "fadd v7.4s, v7.4s, v2.4s\n"
+      "fadd v10.4s, v3.4s, v6.4s\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "fmla v13.4s, v6.4s, v0.s[1]\n"
+      "ldr q6, [x25, x24]\n"
+      "str q8, [x17, %[output_col_stride1]]\n"
+      "fadd v16.4s, v16.4s, v2.4s\n"
+      "str q1, [%[outptr0], x16]\n"
+      "fadd v14.4s, v14.4s, v2.4s\n"
+      "str q7, [x28, %[output_col_stride1]]\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "fadd v13.4s, v13.4s, v24.4s\n"
+      "add %[outptr0], %[outptr0], #16\n"
+      "str q16, [x18]\n"
+      "fadd v5.4s, v5.4s, v2.4s\n"
+      "str q14, [x18, %[output_col_stride1]]\n"
+      "fadd v26.4s, v26.4s, v2.4s\n"
+      "str q10, [x17, x16]\n"
+      "fadd v1.4s, v20.4s, v4.4s\n"
+      "fadd v13.4s, v13.4s, v2.4s\n"
+      "add x17, x17, #16\n"
+      "str q5, [x28, x16]\n"
+      "fadd v5.4s, v17.4s, v22.4s\n"
+      "str q26, [x18, x15]\n"
+      "fsub v7.4s, v20.4s, v4.4s\n"
+      "fadd v8.4s, v19.4s, v1.4s\n"
+      "add x28, x28, #16\n"
+      "str q13, [x18, x16]\n"
+      "mov v4.16b, v1.16b\n"
+      "fsub v10.4s, v17.4s, v22.4s\n"
+      "add x18, x18, #16\n"
+      "mov v1.16b, v7.16b\n"
+      "fadd v8.4s, v8.4s, v5.4s\n"
+      "fmla v4.4s, v5.4s, v0.s[1]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "fmla v1.4s, v10.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v18.4s\n"
+      "bne 2b\n"
+      "3:\n"
+      "fadd v3.4s, v12.4s, v23.4s\n"
+      "ldr q2, [x13]\n"
+      "fadd v27.4s, v9.4s, v11.4s\n"
+      "ldr q21, [x13, %[in_col_stride1]]\n"
+      "fsub v16.4s, v12.4s, v23.4s\n"
+      "ldr q26, [x13, x21]\n"
+      "fsub v9.4s, v9.4s, v11.4s\n"
+      "ldr q17, [x13, x22]\n"
+      "fadd v14.4s, v25.4s, v3.4s\n"
+      "ldr q19, [x13, x23]\n"
+      "mov v11.16b, v3.16b\n"
+      "ldr q10, [x13, x24]\n"
+      "mov v3.16b, v16.16b\n"
+      "ldr q15, [x26]\n"
+      "fmul v9.4s, v9.4s, v0.s[0]\n"
+      "ldr q12, [x26, %[in_col_stride1]]\n"
+      "fadd v14.4s, v14.4s, v27.4s\n"
+      "ldr q20, [x26, x21]\n"
+      "fmla v11.4s, v27.4s, v0.s[1]\n"
+      "ldr q24, [x26, x22]\n"
+      "fadd v23.4s, v21.4s, v26.4s\n"
+      "ldr q29, [x26, x23]\n"
+      "fadd v13.4s, v16.4s, v9.4s\n"
+      "ldr q5, [x26, x24]\n"
+      "fmla v3.4s, v9.4s, v0.s[1]\n"
+      "ldr q18, [x14]\n"
+      "fadd v30.4s, v17.4s, v19.4s\n"
+      "add %[inptr0], %[inptr0], #16\n"
+      "fadd v16.4s, v2.4s, v23.4s\n"
+      "add x25, x25, #16\n"
+      "fsub v21.4s, v21.4s, v26.4s\n"
+      "ldr q22, [x14, %[in_col_stride1]]\n"
+      "fadd v3.4s, v3.4s, v6.4s\n"
+      "ldr q28, [x14, x21]\n"
+      "fsub v19.4s, v17.4s, v19.4s\n"
+      "add x13, x13, #16\n"
+      "fadd v16.4s, v16.4s, v30.4s\n"
+      "add x26, x26, #16\n"
+      "mov v17.16b, v23.16b\n"
+      "fadd v26.4s, v12.4s, v20.4s\n"
+      "fsub v9.4s, v12.4s, v20.4s\n"
+      "ldr q2, [%[bptr]]\n"
+      "fmul v19.4s, v19.4s, v0.s[0]\n"
+      "add %[bptr], %[bptr], #16\n"
+      "fmla v17.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v24.4s, v29.4s\n"
+      "fadd v23.4s, v15.4s, v26.4s\n"
+      "fsub v12.4s, v24.4s, v29.4s\n"
+      "mov v15.16b, v26.16b\n"
+      "fadd v24.4s, v22.4s, v28.4s\n"
+      "fsub v22.4s, v22.4s, v28.4s\n"
+      "fadd v29.4s, v14.4s, v16.4s\n"
+      "fsub v16.4s, v14.4s, v16.4s\n"
+      "ldr q20, [x14, x22]\n"
+      "fadd v23.4s, v23.4s, v25.4s\n"
+      "fmul v12.4s, v12.4s, v0.s[0]\n"
+      "fmla v15.4s, v25.4s, v0.s[1]\n"
+      "mov v6.16b, v21.16b\n"
+      "fadd v30.4s, v21.4s, v19.4s\n"
+      "fadd v26.4s, v18.4s, v24.4s\n"
+      "mov v25.16b, v24.16b\n"
+      "fadd v18.4s, v8.4s, v29.4s\n"
+      "fmla v6.4s, v19.4s, v0.s[1]\n"
+      "mov v19.16b, v29.16b\n"
+      "fadd v27.4s, v11.4s, v17.4s\n"
+      "fsub v11.4s, v11.4s, v17.4s\n"
+      "fadd v29.4s, v13.4s, v30.4s\n"
+      "fsub v8.4s, v13.4s, v30.4s\n"
+      "fadd v14.4s, v9.4s, v12.4s\n"
+      "fadd v6.4s, v6.4s, v10.4s\n"
+      "ldr q28, [x14, x23]\n"
+      "fadd v17.4s, v4.4s, v27.4s\n"
+      "mov v4.16b, v27.16b\n"
+      "fmla v9.4s, v12.4s, v0.s[1]\n"
+      "fadd v27.4s, v20.4s, v28.4s\n"
+      "fsub v21.4s, v20.4s, v28.4s\n"
+      "fadd v10.4s, v7.4s, v29.4s\n"
+      "mov v7.16b, v29.16b\n"
+      "fadd v13.4s, v3.4s, v6.4s\n"
+      "fsub v3.4s, v3.4s, v6.4s\n"
+      "ldr q20, [x14, x24]\n"
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "fadd v26.4s, v26.4s, v27.4s\n"
+      "fmul v21.4s, v21.4s, v0.s[0]\n"
+      "add x14, x14, #16\n"
+      "fmla v25.4s, v27.4s, v0.s[1]\n"
+      "mov v12.16b, v22.16b\n"
+      "fadd v1.4s, v1.4s, v13.4s\n"
+      "mov v5.16b, v13.16b\n"
+      "fadd v13.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "fadd v26.4s, v22.4s, v21.4s\n"
+      "ldr q24, [x27]\n"
+      "fmla v12.4s, v21.4s, v0.s[1]\n"
+      "fadd v30.4s, v15.4s, v25.4s\n"
+      "fsub v15.4s, v15.4s, v25.4s\n"
+      "ldr q28, [x27, %[in_col_stride1]]\n"
+      "fadd v18.4s, v18.4s, v13.4s\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fmla v19.4s, v13.4s, v0.s[1]\n"
+      "ldr q22, [x27, x21]\n"
+      "fadd v12.4s, v12.4s, v20.4s\n"
+      "ldr q29, [x27, x22]\n"
+      "fadd v17.4s, v17.4s, v30.4s\n"
+      "fmul v15.4s, v15.4s, v0.s[0]\n"
+      "fmla v4.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v28.4s, v22.4s\n"
+      "fsub v27.4s, v28.4s, v22.4s\n"
+      "fadd v22.4s, v16.4s, v23.4s\n"
+      "fadd v18.4s, v18.4s, v2.4s\n"
+      "fadd v17.4s, v17.4s, v2.4s\n"
+      "fadd v19.4s, v19.4s, v2.4s\n"
+      "fadd v28.4s, v24.4s, v25.4s\n"
+      "mov v21.16b, v25.16b\n"
+      "fmla v16.4s, v23.4s, v0.s[1]\n"
+      "ldr q30, [x27, x23]\n"
+      "str q18, [%[outptr0]]\n"
+      "fadd v20.4s, v29.4s, v30.4s\n"
+      "str q17, [%[outptr0], x15]\n"
+      "fsub v18.4s, v29.4s, v30.4s\n"
+      "str q19, [x28]\n"
+      "mov v24.16b, v27.16b\n"
+      "fadd v13.4s, v14.4s, v26.4s\n"
+      "ldr q25, [x27, x24]\n"
+      "fadd v28.4s, v28.4s, v20.4s\n"
+      "add x27, x27, #16\n"
+      "fmul v18.4s, v18.4s, v0.s[0]\n"
+      "fmla v21.4s, v20.4s, v0.s[1]\n"
+      "fsub v30.4s, v14.4s, v26.4s\n"
+      "mov v14.16b, v8.16b\n"
+      "fadd v10.4s, v10.4s, v13.4s\n"
+      "fmla v7.4s, v13.4s, v0.s[1]\n"
+      "fadd v16.4s, v16.4s, v28.4s\n"
+      "fadd v29.4s, v11.4s, v15.4s\n"
+      "fadd v23.4s, v27.4s, v18.4s\n"
+      "fmla v24.4s, v18.4s, v0.s[1]\n"
+      "fmul v30.4s, v30.4s, v0.s[0]\n"
+      "mov v26.16b, v11.16b\n"
+      "fadd v27.4s, v9.4s, v12.4s\n"
+      "fsub v6.4s, v9.4s, v12.4s\n"
+      "mov v13.16b, v3.16b\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "fadd v24.4s, v24.4s, v25.4s\n"
+      "fmla v26.4s, v15.4s, v0.s[1]\n"
+      "fadd v8.4s, v8.4s, v30.4s\n"
+      "fmla v14.4s, v30.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v27.4s\n"
+      "fmul v6.4s, v6.4s, v0.s[0]\n"
+      "str q10, [%[outptr0], %[output_col_stride1]]\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "fadd v26.4s, v26.4s, v21.4s\n"
+      "fadd v22.4s, v22.4s, v2.4s\n"
+      "fadd v14.4s, v14.4s, v23.4s\n"
+      "fadd v8.4s, v8.4s, v2.4s\n"
+      "fadd v10.4s, v3.4s, v6.4s\n"
+      "fmla v13.4s, v6.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v2.4s\n"
+      "fadd v29.4s, v29.4s, v2.4s\n"
+      "str q22, [x17]\n"
+      "fadd v7.4s, v7.4s, v2.4s\n"
+      "str q8, [x17, %[output_col_stride1]]\n"
+      "fadd v4.4s, v4.4s, v2.4s\n"
+      "fadd v13.4s, v13.4s, v24.4s\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "str q1, [%[outptr0], x16]\n"
+      "fadd v5.4s, v5.4s, v2.4s\n"
+      "str q29, [x17, x15]\n"
+      "fadd v16.4s, v16.4s, v2.4s\n"
+      "str q7, [x28, %[output_col_stride1]]\n"
+      "fadd v14.4s, v14.4s, v2.4s\n"
+      "str q10, [x17, x16]\n"
+      "fadd v26.4s, v26.4s, v2.4s\n"
+      "str q4, [x28, x15]\n"
+      "fadd v13.4s, v13.4s, v2.4s\n"
+      "str q5, [x28, x16]\n"
+      "add %[outptr0], %[outptr0], #16\n"
+      "str q16, [x18]\n"
+      "add x17, x17, #16\n"
+      "str q14, [x18, %[output_col_stride1]]\n"
+      "add x28, x28, #16\n"
+      "str q26, [x18, x15]\n"
+      "str q13, [x18, x16]\n"
+      "add x18, x18, #16\n"
+      "4:\n"
+      "cmp x20, #2\n"
+      "blt 5f\n"
+      "ldr d19, [%[inptr0]]\n"
+      "ldr d20, [%[inptr0], %[in_col_stride1]]\n"
+      "sub x20, x20, #2\n"
+      "ldr d4, [%[inptr0], x21]\n"
+      "ldr d17, [%[inptr0], x22]\n"
+      "fadd v1.4s, v20.4s, v4.4s\n"
+      "ldr d22, [%[inptr0], x23]\n"
+      "fadd v5.4s, v17.4s, v22.4s\n"
+      "ldr d18, [%[inptr0], x24]\n"
+      "fsub v7.4s, v20.4s, v4.4s\n"
+      "ldr d25, [x25]\n"
+      "fsub v10.4s, v17.4s, v22.4s\n"
+      "ldr d12, [x25, %[in_col_stride1]]\n"
+      "fadd v8.4s, v19.4s, v1.4s\n"
+      "ldr d23, [x25, x21]\n"
+      "mov v4.16b, v1.16b\n"
+      "ldr d9, [x25, x22]\n"
+      "mov v1.16b, v7.16b\n"
+      "ldr d11, [x25, x23]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "ldr d6, [x25, x24]\n"
+      "fadd v8.4s, v8.4s, v5.4s\n"
+      "ldr d2, [x13]\n"
+      "fmla v4.4s, v5.4s, v0.s[1]\n"
+      "ldr d21, [x13, %[in_col_stride1]]\n"
+      "fadd v3.4s, v12.4s, v23.4s\n"
+      "ldr d26, [x13, x21]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "ldr d17, [x13, x22]\n"
+      "fmla v1.4s, v10.4s, v0.s[1]\n"
+      "ldr d19, [x13, x23]\n"
+      "fadd v27.4s, v9.4s, v11.4s\n"
+      "ldr d10, [x13, x24]\n"
+      "fadd v14.4s, v25.4s, v3.4s\n"
+      "ldr d15, [x26]\n"
+      "fsub v16.4s, v12.4s, v23.4s\n"
+      "ldr d12, [x26, %[in_col_stride1]]\n"
+      "fadd v1.4s, v1.4s, v18.4s\n"
+      "ldr d20, [x26, x21]\n"
+      "fsub v9.4s, v9.4s, v11.4s\n"
+      "ldr d24, [x26, x22]\n"
+      "fadd v14.4s, v14.4s, v27.4s\n"
+      "ldr d29, [x26, x23]\n"
+      "mov v11.16b, v3.16b\n"
+      "ldr d5, [x26, x24]\n"
+      "mov v3.16b, v16.16b\n"
+      "ldr d18, [x14]\n"
+      "fmul v9.4s, v9.4s, v0.s[0]\n"
+      "add %[inptr0], %[inptr0], #8\n"
+      "fmla v11.4s, v27.4s, v0.s[1]\n"
+      "add x25, x25, #8\n"
+      "fadd v23.4s, v21.4s, v26.4s\n"
+      "add x13, x13, #8\n"
+      "fsub v21.4s, v21.4s, v26.4s\n"
+      "ldr d22, [x14, %[in_col_stride1]]\n"
+      "fadd v13.4s, v16.4s, v9.4s\n"
+      "add x26, x26, #8\n"
+      "fmla v3.4s, v9.4s, v0.s[1]\n"
+      "fadd v30.4s, v17.4s, v19.4s\n"
+      "fadd v16.4s, v2.4s, v23.4s\n"
+      "fsub v19.4s, v17.4s, v19.4s\n"
+      "mov v17.16b, v23.16b\n"
+      "fadd v26.4s, v12.4s, v20.4s\n"
+      "fsub v9.4s, v12.4s, v20.4s\n"
+      "ldr d28, [x14, x21]\n"
+      "fadd v3.4s, v3.4s, v6.4s\n"
+      "ldr d20, [x14, x22]\n"
+      "fadd v16.4s, v16.4s, v30.4s\n"
+      "fmul v19.4s, v19.4s, v0.s[0]\n"
+      "fmla v17.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v24.4s, v29.4s\n"
+      "fadd v23.4s, v15.4s, v26.4s\n"
+      "fsub v12.4s, v24.4s, v29.4s\n"
+      "mov v15.16b, v26.16b\n"
+      "fadd v24.4s, v22.4s, v28.4s\n"
+      "fsub v22.4s, v22.4s, v28.4s\n"
+      "fadd v29.4s, v14.4s, v16.4s\n"
+      "fsub v16.4s, v14.4s, v16.4s\n"
+      "ldr d28, [x14, x23]\n"
+      "fadd v23.4s, v23.4s, v25.4s\n"
+      "fmul v12.4s, v12.4s, v0.s[0]\n"
+      "fmla v15.4s, v25.4s, v0.s[1]\n"
+      "mov v6.16b, v21.16b\n"
+      "fadd v30.4s, v21.4s, v19.4s\n"
+      "fadd v26.4s, v18.4s, v24.4s\n"
+      "mov v25.16b, v24.16b\n"
+      "fadd v18.4s, v8.4s, v29.4s\n"
+      "fmla v6.4s, v19.4s, v0.s[1]\n"
+      "fadd v27.4s, v20.4s, v28.4s\n"
+      "fsub v21.4s, v20.4s, v28.4s\n"
+      "mov v19.16b, v29.16b\n"
+      "fadd v29.4s, v13.4s, v30.4s\n"
+      "fsub v8.4s, v13.4s, v30.4s\n"
+      "fadd v14.4s, v9.4s, v12.4s\n"
+      "fadd v6.4s, v6.4s, v10.4s\n"
+      "ldr d20, [x14, x24]\n"
+      "fadd v26.4s, v26.4s, v27.4s\n"
+      "add x14, x14, #8\n"
+      "fmla v9.4s, v12.4s, v0.s[1]\n"
+      "ldr d24, [x27]\n"
+      "fmul v21.4s, v21.4s, v0.s[0]\n"
+      "fmla v25.4s, v27.4s, v0.s[1]\n"
+      "fadd v10.4s, v7.4s, v29.4s\n"
+      "ldr d2, [%[bptr]]\n"
+      "mov v7.16b, v29.16b\n"
+      "add %[bptr], %[bptr], #8\n"
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "fadd v13.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "fadd v27.4s, v11.4s, v17.4s\n"
+      "fsub v11.4s, v11.4s, v17.4s\n"
+      "fadd v30.4s, v15.4s, v25.4s\n"
+      "fsub v15.4s, v15.4s, v25.4s\n"
+      "ldr d28, [x27, %[in_col_stride1]]\n"
+      "fadd v18.4s, v18.4s, v13.4s\n"
+      "fmla v19.4s, v13.4s, v0.s[1]\n"
+      "fadd v26.4s, v22.4s, v21.4s\n"
+      "mov v12.16b, v22.16b\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fadd v17.4s, v4.4s, v27.4s\n"
+      "fmul v15.4s, v15.4s, v0.s[0]\n"
+      "mov v4.16b, v27.16b\n"
+      "fmla v12.4s, v21.4s, v0.s[1]\n"
+      "ldr d22, [x27, x21]\n"
+      "fadd v18.4s, v18.4s, v2.4s\n"
+      "fadd v19.4s, v19.4s, v2.4s\n"
+      "fadd v17.4s, v17.4s, v30.4s\n"
+      "fmla v4.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v28.4s, v22.4s\n"
+      "fsub v27.4s, v28.4s, v22.4s\n"
+      "fadd v12.4s, v12.4s, v20.4s\n"
+      "ldr d29, [x27, x22]\n"
+      "str d18, [%[outptr0]]\n"
+      "fadd v22.4s, v16.4s, v23.4s\n"
+      "str d19, [x28]\n"
+      "fadd v28.4s, v24.4s, v25.4s\n"
+      "ldr d30, [x27, x23]\n"
+      "fadd v20.4s, v29.4s, v30.4s\n"
+      "fsub v18.4s, v29.4s, v30.4s\n"
+      "mov v21.16b, v25.16b\n"
+      "ldr d25, [x27, x24]\n"
+      "fmla v16.4s, v23.4s, v0.s[1]\n"
+      "add x27, x27, #8\n"
+      "mov v24.16b, v27.16b\n"
+      "fadd v17.4s, v17.4s, v2.4s\n"
+      "fadd v28.4s, v28.4s, v20.4s\n"
+      "fmul v18.4s, v18.4s, v0.s[0]\n"
+      "fmla v21.4s, v20.4s, v0.s[1]\n"
+      "fadd v13.4s, v14.4s, v26.4s\n"
+      "fsub v30.4s, v14.4s, v26.4s\n"
+      "mov v14.16b, v8.16b\n"
+      "str d17, [%[outptr0], x15]\n"
+      "fadd v29.4s, v11.4s, v15.4s\n"
+      "fadd v23.4s, v27.4s, v18.4s\n"
+      "fmla v24.4s, v18.4s, v0.s[1]\n"
+      "fadd v16.4s, v16.4s, v28.4s\n"
+      "fadd v10.4s, v10.4s, v13.4s\n"
+      "fmul v30.4s, v30.4s, v0.s[0]\n"
+      "fmla v7.4s, v13.4s, v0.s[1]\n"
+      "mov v26.16b, v11.16b\n"
+      "fadd v13.4s, v3.4s, v6.4s\n"
+      "fadd v24.4s, v24.4s, v25.4s\n"
+      "fadd v27.4s, v9.4s, v12.4s\n"
+      "fsub v3.4s, v3.4s, v6.4s\n"
+      "fsub v6.4s, v9.4s, v12.4s\n"
+      "fadd v8.4s, v8.4s, v30.4s\n"
+      "fmla v14.4s, v30.4s, v0.s[1]\n"
+      "fmla v26.4s, v15.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v13.4s\n"
+      "mov v5.16b, v13.16b\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "fmul v6.4s, v6.4s, v0.s[0]\n"
+      "mov v13.16b, v3.16b\n"
+      "fadd v14.4s, v14.4s, v23.4s\n"
+      "fadd v22.4s, v22.4s, v2.4s\n"
+      "fadd v26.4s, v26.4s, v21.4s\n"
+      "fadd v1.4s, v1.4s, v27.4s\n"
+      "str d10, [%[outptr0], %[output_col_stride1]]\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "fadd v10.4s, v3.4s, v6.4s\n"
+      "fmla v13.4s, v6.4s, v0.s[1]\n"
+      "str d22, [x17]\n"
+      "fadd v8.4s, v8.4s, v2.4s\n"
+      "fadd v1.4s, v1.4s, v2.4s\n"
+      "fadd v29.4s, v29.4s, v2.4s\n"
+      "fadd v7.4s, v7.4s, v2.4s\n"
+      "fadd v4.4s, v4.4s, v2.4s\n"
+      "fadd v13.4s, v13.4s, v24.4s\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "str d8, [x17, %[output_col_stride1]]\n"
+      "fadd v5.4s, v5.4s, v2.4s\n"
+      "str d1, [%[outptr0], x16]\n"
+      "fadd v16.4s, v16.4s, v2.4s\n"
+      "str d29, [x17, x15]\n"
+      "fadd v14.4s, v14.4s, v2.4s\n"
+      "str d10, [x17, x16]\n"
+      "fadd v26.4s, v26.4s, v2.4s\n"
+      "str d7, [x28, %[output_col_stride1]]\n"
+      "fadd v13.4s, v13.4s, v2.4s\n"
+      "str d4, [x28, x15]\n"
+      "add %[outptr0], %[outptr0], #8\n"
+      "str d5, [x28, x16]\n"
+      "add x17, x17, #8\n"
+      "str d16, [x18]\n"
+      "add x28, x28, #8\n"
+      "str d14, [x18, %[output_col_stride1]]\n"
+      "str d26, [x18, x15]\n"
+      "str d13, [x18, x16]\n"
+      "add x18, x18, #8\n"
+      "5:\n"
+      "cbz x20, 6f\n"
+      "ldr s19, [%[inptr0]]\n"
+      "ldr s20, [%[inptr0], %[in_col_stride1]]\n"
+      "ldr s4, [%[inptr0], x21]\n"
+      "fadd v1.4s, v20.4s, v4.4s\n"
+      "ldr s17, [%[inptr0], x22]\n"
+      "fsub v7.4s, v20.4s, v4.4s\n"
+      "ldr s22, [%[inptr0], x23]\n"
+      "fadd v5.4s, v17.4s, v22.4s\n"
+      "ldr s18, [%[inptr0], x24]\n"
+      "fsub v10.4s, v17.4s, v22.4s\n"
+      "ldr s25, [x25]\n"
+      "fadd v8.4s, v19.4s, v1.4s\n"
+      "ldr s12, [x25, %[in_col_stride1]]\n"
+      "mov v4.16b, v1.16b\n"
+      "ldr s23, [x25, x21]\n"
+      "mov v1.16b, v7.16b\n"
+      "ldr s9, [x25, x22]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "ldr s11, [x25, x23]\n"
+      "fadd v8.4s, v8.4s, v5.4s\n"
+      "ldr s6, [x25, x24]\n"
+      "fmla v4.4s, v5.4s, v0.s[1]\n"
+      "ldr s2, [x13]\n"
+      "fadd v3.4s, v12.4s, v23.4s\n"
+      "ldr s21, [x13, %[in_col_stride1]]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "ldr s26, [x13, x21]\n"
+      "fmla v1.4s, v10.4s, v0.s[1]\n"
+      "ldr s17, [x13, x22]\n"
+      "fadd v27.4s, v9.4s, v11.4s\n"
+      "ldr s19, [x13, x23]\n"
+      "fadd v14.4s, v25.4s, v3.4s\n"
+      "ldr s10, [x13, x24]\n"
+      "fsub v16.4s, v12.4s, v23.4s\n"
+      "ldr s15, [x26]\n"
+      "fadd v1.4s, v1.4s, v18.4s\n"
+      "ldr s12, [x26, %[in_col_stride1]]\n"
+      "fsub v9.4s, v9.4s, v11.4s\n"
+      "ldr s20, [x26, x21]\n"
+      "fadd v14.4s, v14.4s, v27.4s\n"
+      "ldr s24, [x26, x22]\n"
+      "mov v11.16b, v3.16b\n"
+      "ldr s29, [x26, x23]\n"
+      "mov v3.16b, v16.16b\n"
+      "ldr s5, [x26, x24]\n"
+      "fmul v9.4s, v9.4s, v0.s[0]\n"
+      "ldr s18, [x14]\n"
+      "fmla v11.4s, v27.4s, v0.s[1]\n"
+      "fadd v23.4s, v21.4s, v26.4s\n"
+      "fsub v21.4s, v21.4s, v26.4s\n"
+      "fadd v30.4s, v17.4s, v19.4s\n"
+      "fsub v19.4s, v17.4s, v19.4s\n"
+      "ldr s22, [x14, %[in_col_stride1]]\n"
+      "fadd v13.4s, v16.4s, v9.4s\n"
+      "fmla v3.4s, v9.4s, v0.s[1]\n"
+      "fadd v16.4s, v2.4s, v23.4s\n"
+      "mov v17.16b, v23.16b\n"
+      "fadd v26.4s, v12.4s, v20.4s\n"
+      "fsub v9.4s, v12.4s, v20.4s\n"
+      "fmul v19.4s, v19.4s, v0.s[0]\n"
+      "ldr s28, [x14, x21]\n"
+      "fadd v3.4s, v3.4s, v6.4s\n"
+      "ldr s20, [x14, x22]\n"
+      "fadd v16.4s, v16.4s, v30.4s\n"
+      "fmla v17.4s, v30.4s, v0.s[1]\n"
+      "fadd v25.4s, v24.4s, v29.4s\n"
+      "fadd v23.4s, v15.4s, v26.4s\n"
+      "fsub v12.4s, v24.4s, v29.4s\n"
+      "mov v15.16b, v26.16b\n"
+      "fadd v24.4s, v22.4s, v28.4s\n"
+      "fsub v22.4s, v22.4s, v28.4s\n"
+      "fadd v30.4s, v21.4s, v19.4s\n"
+      "mov v6.16b, v21.16b\n"
+      "fadd v23.4s, v23.4s, v25.4s\n"
+      "fmla v15.4s, v25.4s, v0.s[1]\n"
+      "fmul v12.4s, v12.4s, v0.s[0]\n"
+      "ldr s28, [x14, x23]\n"
+      "fmla v6.4s, v19.4s, v0.s[1]\n"
+      "fadd v27.4s, v20.4s, v28.4s\n"
+      "fadd v26.4s, v18.4s, v24.4s\n"
+      "fsub v21.4s, v20.4s, v28.4s\n"
+      "mov v25.16b, v24.16b\n"
+      "fadd v29.4s, v14.4s, v16.4s\n"
+      "fsub v16.4s, v14.4s, v16.4s\n"
+      "ldr s20, [x14, x24]\n"
+      "fadd v6.4s, v6.4s, v10.4s\n"
+      "ldr s24, [x27]\n"
+      "fadd v26.4s, v26.4s, v27.4s\n"
+      "fmul v21.4s, v21.4s, v0.s[0]\n"
+      "fmla v25.4s, v27.4s, v0.s[1]\n"
+      "fadd v18.4s, v8.4s, v29.4s\n"
+      "mov v19.16b, v29.16b\n"
+      "fadd v29.4s, v13.4s, v30.4s\n"
+      "fsub v8.4s, v13.4s, v30.4s\n"
+      "fadd v27.4s, v11.4s, v17.4s\n"
+      "fsub v11.4s, v11.4s, v17.4s\n"
+      "fadd v13.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "ldr s28, [x27, %[in_col_stride1]]\n"
+      "fadd v10.4s, v7.4s, v29.4s\n"
+      "mov v7.16b, v29.16b\n"
+      "fadd v17.4s, v4.4s, v27.4s\n"
+      "mov v4.16b, v27.16b\n"
+      "fadd v18.4s, v18.4s, v13.4s\n"
+      "fmla v19.4s, v13.4s, v0.s[1]\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fadd v30.4s, v15.4s, v25.4s\n"
+      "fsub v15.4s, v15.4s, v25.4s\n"
+      "fadd v13.4s, v3.4s, v6.4s\n"
+      "fsub v3.4s, v3.4s, v6.4s\n"
+      "ldr s2, [%[bptr]]\n"
+      "fadd v18.4s, v18.4s, v2.4s\n"
+      "fadd v19.4s, v19.4s, v2.4s\n"
+      "fadd v17.4s, v17.4s, v30.4s\n"
+      "fmla v4.4s, v30.4s, v0.s[1]\n"
+      "fadd v14.4s, v9.4s, v12.4s\n"
+      "fmul v15.4s, v15.4s, v0.s[0]\n"
+      "fadd v1.4s, v1.4s, v13.4s\n"
+      "str s18, [%[outptr0]]\n"
+      "fadd v26.4s, v22.4s, v21.4s\n"
+      "str s19, [x28]\n"
+      "fmla v9.4s, v12.4s, v0.s[1]\n"
+      "mov v12.16b, v22.16b\n"
+      "ldr s22, [x27, x21]\n"
+      "fadd v25.4s, v28.4s, v22.4s\n"
+      "fsub v27.4s, v28.4s, v22.4s\n"
+      "fadd v22.4s, v16.4s, v23.4s\n"
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "ldr s29, [x27, x22]\n"
+      "fmla v12.4s, v21.4s, v0.s[1]\n"
+      "ldr s30, [x27, x23]\n"
+      "fadd v28.4s, v24.4s, v25.4s\n"
+      "mov v21.16b, v25.16b\n"
+      "fmla v16.4s, v23.4s, v0.s[1]\n"
+      "ldr s25, [x27, x24]\n"
+      "mov v5.16b, v13.16b\n"
+      "fadd v17.4s, v17.4s, v2.4s\n"
+      "fadd v12.4s, v12.4s, v20.4s\n"
+      "fadd v20.4s, v29.4s, v30.4s\n"
+      "fsub v18.4s, v29.4s, v30.4s\n"
+      "mov v24.16b, v27.16b\n"
+      "fadd v22.4s, v22.4s, v2.4s\n"
+      "fadd v4.4s, v4.4s, v2.4s\n"
+      "str s17, [%[outptr0], x15]\n"
+      "fadd v13.4s, v14.4s, v26.4s\n"
+      "fadd v28.4s, v28.4s, v20.4s\n"
+      "fmla v21.4s, v20.4s, v0.s[1]\n"
+      "fmul v18.4s, v18.4s, v0.s[0]\n"
+      "fsub v30.4s, v14.4s, v26.4s\n"
+      "str s22, [x17]\n"
+      "mov v14.16b, v8.16b\n"
+      "str s4, [x28, x15]\n"
+      "fadd v10.4s, v10.4s, v13.4s\n"
+      "fadd v16.4s, v16.4s, v28.4s\n"
+      "fmla v7.4s, v13.4s, v0.s[1]\n"
+      "fadd v23.4s, v27.4s, v18.4s\n"
+      "fmla v24.4s, v18.4s, v0.s[1]\n"
+      "fmul v30.4s, v30.4s, v0.s[0]\n"
+      "fadd v29.4s, v11.4s, v15.4s\n"
+      "mov v26.16b, v11.16b\n"
+      "fadd v27.4s, v9.4s, v12.4s\n"
+      "fsub v6.4s, v9.4s, v12.4s\n"
+      "mov v13.16b, v3.16b\n"
+      "fadd v24.4s, v24.4s, v25.4s\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "fadd v8.4s, v8.4s, v30.4s\n"
+      "fmla v14.4s, v30.4s, v0.s[1]\n"
+      "fmla v26.4s, v15.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v27.4s\n"
+      "fmul v6.4s, v6.4s, v0.s[0]\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "str s10, [%[outptr0], %[output_col_stride1]]\n"
+      "fadd v29.4s, v29.4s, v2.4s\n"
+      "fadd v14.4s, v14.4s, v23.4s\n"
+      "fadd v8.4s, v8.4s, v2.4s\n"
+      "fadd v26.4s, v26.4s, v21.4s\n"
+      "fadd v1.4s, v1.4s, v2.4s\n"
+      "fadd v10.4s, v3.4s, v6.4s\n"
+      "fmla v13.4s, v6.4s, v0.s[1]\n"
+      "str s29, [x17, x15]\n"
+      "fadd v7.4s, v7.4s, v2.4s\n"
+      "str s8, [x17, %[output_col_stride1]]\n"
+      "fadd v5.4s, v5.4s, v2.4s\n"
+      "str s1, [%[outptr0], x16]\n"
+      "fadd v16.4s, v16.4s, v2.4s\n"
+      "fadd v13.4s, v13.4s, v24.4s\n"
+      "fadd v10.4s, v10.4s, v2.4s\n"
+      "str s7, [x28, %[output_col_stride1]]\n"
+      "fadd v14.4s, v14.4s, v2.4s\n"
+      "str s5, [x28, x16]\n"
+      "fadd v26.4s, v26.4s, v2.4s\n"
+      "str s16, [x18]\n"
+      "fadd v13.4s, v13.4s, v2.4s\n"
+      "str s10, [x17, x16]\n"
+      "str s14, [x18, %[output_col_stride1]]\n"
+      "str s26, [x18, x15]\n"
+      "str s13, [x18, x16]\n"
+      "6:\n"
+      : [bptr] "+r" (bptr), [outptr0] "+r" (output), [inptr0] "+r" (inptr)
+      : [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float))
+      : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+    );
+  }
+  else
+  {
+    __asm__ __volatile__ (
+      "ldr d0, [%[pcoeffs]]\n"
+      "add x21, %[in_col_stride1], %[in_col_stride1]\n"  // Compute input column stride 2
+      "add x22, x21, %[in_col_stride1]\n"  // Compute input column stride 3
+      "add x25, %[inptr0], %[in_row_stride]\n"  // Compute input row pointers
+      "add x15, %[output_col_stride1], %[output_col_stride1]\n"  // Compute output column stride 2
+      "add x23, x22, %[in_col_stride1]\n"  // Compute input column stride 4
+      "add x13, x25, %[in_row_stride]\n"  // Compute input row pointers
+      "add x16, x15, %[output_col_stride1]\n"  // Compute output column stride 3
+      "add x24, x23, %[in_col_stride1]\n"  // Compute input column stride 5
+      "add x26, x13, %[in_row_stride]\n"  // Compute input row pointers
+      "add x17, %[outptr0], %[output_row_stride]\n"  // Compute output row pointer 1
+      "add x14, x26, %[in_row_stride]\n"  // Compute input row pointers
+      "add x28, x17, %[output_row_stride]\n"  // Compute output row pointer 2
+      "lsr x19, %[n_channels], #2\n"
+      "add x27, x14, %[in_row_stride]\n"  // Compute input row pointers
+      "add x18, x28, %[output_row_stride]\n"  // Compute output row pointer 3
+      "and x20, %[n_channels], #3\n"
+      "cbz x19, 4f\n"
+      "1:\n"  // Quad head
+      "ldr q17, [%[inptr0]]\n"
+      "subs x19, x19, #1\n"
+      "ldr q23, [%[inptr0], %[in_col_stride1]]\n"
+      "ldr q27, [%[inptr0], x21]\n"
+      "fadd v4.4s, v23.4s, v27.4s\n"
+      "ldr q24, [%[inptr0], x22]\n"
+      "fsub v13.4s, v23.4s, v27.4s\n"
+      "ldr q11, [%[inptr0], x23]\n"
+      "fadd v10.4s, v24.4s, v11.4s\n"
+      "ldr q12, [%[inptr0], x24]\n"
+      "fsub v11.4s, v24.4s, v11.4s\n"
+      "ldr q20, [x25]\n"
+      "fadd v7.4s, v17.4s, v4.4s\n"
+      "ldr q19, [x25, %[in_col_stride1]]\n"
+      "mov v4.16b, v4.16b\n"
+      "ldr q22, [x25, x21]\n"
+      "mov v1.16b, v13.16b\n"
+      "ldr q14, [x25, x22]\n"
+      "fmul v11.4s, v11.4s, v0.s[0]\n"
+      "ldr q18, [x25, x23]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "ldr q3, [x25, x24]\n"
+      "fmla v4.4s, v10.4s, v0.s[1]\n"
+      "fadd v8.4s, v13.4s, v11.4s\n"
+      "fmla v1.4s, v11.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v12.4s\n"
+      "beq 3f\n"
+      "2:\n"  // Quad loop
+      "fadd v2.4s, v19.4s, v22.4s\n"
+      "ldr q16, [x13]\n"
+      "fadd v23.4s, v14.4s, v18.4s\n"
+      "ldr q21, [x13, %[in_col_stride1]]\n"
+      "fsub v15.4s, v19.4s, v22.4s\n"
+      "ldr q24, [x13, x21]\n"
+      "fsub v31.4s, v14.4s, v18.4s\n"
+      "ldr q25, [x13, x22]\n"
+      "fadd v11.4s, v20.4s, v2.4s\n"
+      "ldr q17, [x13, x23]\n"
+      "mov v13.16b, v2.16b\n"
+      "ldr q9, [x13, x24]\n"
+      "mov v2.16b, v15.16b\n"
+      "ldr q6, [x26]\n"
+      "fmul v31.4s, v31.4s, v0.s[0]\n"
+      "ldr q19, [x26, %[in_col_stride1]]\n"
+      "fadd v11.4s, v11.4s, v23.4s\n"
+      "ldr q22, [x26, x21]\n"
+      "fmla v13.4s, v23.4s, v0.s[1]\n"
+      "ldr q12, [x26, x22]\n"
+      "fadd v29.4s, v21.4s, v24.4s\n"
+      "ldr q26, [x26, x23]\n"
+      "fadd v15.4s, v15.4s, v31.4s\n"
+      "ldr q5, [x26, x24]\n"
+      "fmla v2.4s, v31.4s, v0.s[1]\n"
+      "ldr q10, [x14]\n"
+      "fadd v18.4s, v25.4s, v17.4s\n"
+      "add %[inptr0], %[inptr0], #16\n"
+      "fadd v27.4s, v16.4s, v29.4s\n"
+      "add x25, x25, #16\n"
+      "fsub v14.4s, v21.4s, v24.4s\n"
+      "ldr q30, [x14, %[in_col_stride1]]\n"
+      "fadd v2.4s, v2.4s, v3.4s\n"
+      "ldr q31, [x14, x21]\n"
+      "fsub v28.4s, v25.4s, v17.4s\n"
+      "add x13, x13, #16\n"
+      "fadd v27.4s, v27.4s, v18.4s\n"
+      "add x26, x26, #16\n"
+      "mov v21.16b, v29.16b\n"
+      "subs x19, x19, #1\n"
+      "fadd v20.4s, v19.4s, v22.4s\n"
+      "fsub v17.4s, v19.4s, v22.4s\n"
+      "fmul v28.4s, v28.4s, v0.s[0]\n"
+      "ldr q23, [x14, x22]\n"
+      "fmla v21.4s, v18.4s, v0.s[1]\n"
+      "fadd v29.4s, v12.4s, v26.4s\n"
+      "fsub v16.4s, v12.4s, v26.4s\n"
+      "fadd v25.4s, v30.4s, v31.4s\n"
+      "fadd v24.4s, v6.4s, v20.4s\n"
+      "mov v6.16b, v20.16b\n"
+      "fsub v22.4s, v30.4s, v31.4s\n"
+      "fadd v31.4s, v11.4s, v27.4s\n"
+      "fsub v12.4s, v11.4s, v27.4s\n"
+      "ldr q26, [x14, x23]\n"
+      "fmul v16.4s, v16.4s, v0.s[0]\n"
+      "fmla v6.4s, v29.4s, v0.s[1]\n"
+      "fadd v24.4s, v24.4s, v29.4s\n"
+      "mov v3.16b, v14.16b\n"
+      "fadd v20.4s, v14.4s, v28.4s\n"
+      "fadd v29.4s, v10.4s, v25.4s\n"
+      "mov v10.16b, v25.16b\n"
+      "fadd v25.4s, v7.4s, v31.4s\n"
+      "fmla v3.4s, v28.4s, v0.s[1]\n"
+      "fadd v14.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "mov v26.16b, v31.16b\n"
+      "fadd v31.4s, v15.4s, v20.4s\n"
+      "fsub v11.4s, v15.4s, v20.4s\n"
+      "fadd v20.4s, v17.4s, v16.4s\n"
+      "mov v7.16b, v17.16b\n"
+      "fadd v3.4s, v3.4s, v9.4s\n"
+      "ldr q18, [x14, x24]\n"
+      "fadd v29.4s, v29.4s, v14.4s\n"
+      "add x14, x14, #16\n"
+      "fmla v7.4s, v16.4s, v0.s[1]\n"
+      "ldr q19, [x27]\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fmla v10.4s, v14.4s, v0.s[1]\n"
+      "fadd v15.4s, v8.4s, v31.4s\n"
+      "mov v14.16b, v31.16b\n"
+      "fadd v28.4s, v24.4s, v29.4s\n"
+      "fsub v24.4s, v24.4s, v29.4s\n"
+      "fadd v7.4s, v7.4s, v5.4s\n"
+      "ldr q27, [x27, %[in_col_stride1]]\n"
+      "fadd v30.4s, v13.4s, v21.4s\n"
+      "fsub v9.4s, v13.4s, v21.4s\n"
+      "fadd v17.4s, v22.4s, v23.4s\n"
+      "mov v8.16b, v22.16b\n"
+      "fadd v25.4s, v25.4s, v28.4s\n"
+      "fmul v24.4s, v24.4s, v0.s[0]\n"
+      "fmla v26.4s, v28.4s, v0.s[1]\n"
+      "ldr q29, [x27, x21]\n"
+      "fmla v8.4s, v23.4s, v0.s[1]\n"
+      "ldr q28, [x27, x22]\n"
+      "fadd v13.4s, v4.4s, v30.4s\n"
+      "mov v4.16b, v30.16b\n"
+      "str q25, [%[outptr0]]\n"  // Store output (0, 0)
+      "fadd v16.4s, v27.4s, v29.4s\n"
+      "str q26, [x28]\n"  // Store output (2, 0)
+      "fsub v29.4s, v27.4s, v29.4s\n"
+      "fadd v8.4s, v8.4s, v18.4s\n"
+      "ldr q23, [x27, x23]\n"
+      "fadd v30.4s, v28.4s, v23.4s\n"
+      "ldr q25, [x27, x24]\n"
+      "fadd v19.4s, v19.4s, v16.4s\n"
+      "add x27, x27, #16\n"
+      "fsub v27.4s, v28.4s, v23.4s\n"
+      "mov v16.16b, v16.16b\n"
+      "fadd v22.4s, v20.4s, v17.4s\n"
+      "fsub v20.4s, v20.4s, v17.4s\n"
+      "fadd v21.4s, v12.4s, v24.4s\n"
+      "mov v26.16b, v12.16b\n"
+      "fadd v19.4s, v19.4s, v30.4s\n"
+      "fmla v16.4s, v30.4s, v0.s[1]\n"
+      "fmul v27.4s, v27.4s, v0.s[0]\n"
+      "ldr q17, [%[inptr0]]\n"
+      "fmla v26.4s, v24.4s, v0.s[1]\n"
+      "ldr q23, [%[inptr0], %[in_col_stride1]]\n"
+      "str q21, [x17]\n"  // Store output (1, 0)
+      "mov v5.16b, v29.16b\n"
+      "fadd v15.4s, v15.4s, v22.4s\n"
+      "fmul v20.4s, v20.4s, v0.s[0]\n"
+      "fadd v18.4s, v29.4s, v27.4s\n"
+      "fmla v14.4s, v22.4s, v0.s[1]\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "ldr q27, [%[inptr0], x21]\n"
+      "fadd v26.4s, v26.4s, v19.4s\n"
+      "ldr q24, [%[inptr0], x22]\n"
+      "str q15, [%[outptr0], %[output_col_stride1]]\n"  // Store output (0, 1)
+      "fadd v12.4s, v11.4s, v20.4s\n"
+      "str q14, [x28, %[output_col_stride1]]\n"  // Store output (2, 1)
+      "mov v28.16b, v11.16b\n"
+      "fadd v5.4s, v5.4s, v25.4s\n"
+      "ldr q11, [%[inptr0], x23]\n"
+      "str q26, [x18]\n"  // Store output (3, 0)
+      "fadd v21.4s, v6.4s, v10.4s\n"
+      "str q12, [x17, %[output_col_stride1]]\n"  // Store output (1, 1)
+      "fmla v28.4s, v20.4s, v0.s[1]\n"
+      "fsub v10.4s, v6.4s, v10.4s\n"
+      "ldr q12, [%[inptr0], x24]\n"
+      "mov v15.16b, v9.16b\n"
+      "ldr q20, [x25]\n"
+      "fadd v13.4s, v13.4s, v21.4s\n"
+      "ldr q19, [x25, %[in_col_stride1]]\n"
+      "fadd v28.4s, v28.4s, v18.4s\n"
+      "ldr q22, [x25, x21]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "ldr q14, [x25, x22]\n"
+      "fmla v4.4s, v21.4s, v0.s[1]\n"
+      "ldr q18, [x25, x23]\n"
+      "str q13, [%[outptr0], x15]\n"  // Store output (0, 2)
+      "fadd v6.4s, v2.4s, v3.4s\n"
+      "str q28, [x18, %[output_col_stride1]]\n"  // Store output (3, 1)
+      "fadd v30.4s, v7.4s, v8.4s\n"
+      "fadd v13.4s, v9.4s, v10.4s\n"
+      "fmla v15.4s, v10.4s, v0.s[1]\n"
+      "str q4, [x28, x15]\n"  // Store output (2, 2)
+      "fsub v2.4s, v2.4s, v3.4s\n"
+      "fadd v1.4s, v1.4s, v6.4s\n"
+      "ldr q3, [x25, x24]\n"
+      "fsub v8.4s, v7.4s, v8.4s\n"
+      "mov v6.16b, v6.16b\n"
+      "str q13, [x17, x15]\n"  // Store output (1, 2)
+      "fadd v15.4s, v15.4s, v16.4s\n"
+      "mov v9.16b, v2.16b\n"
+      "fadd v4.4s, v23.4s, v27.4s\n"
+      "fadd v1.4s, v1.4s, v30.4s\n"
+      "fmla v6.4s, v30.4s, v0.s[1]\n"
+      "fmul v8.4s, v8.4s, v0.s[0]\n"
+      "fadd v10.4s, v24.4s, v11.4s\n"
+      "str q15, [x18, x15]\n"  // Store output (3, 2)
+      "fsub v13.4s, v23.4s, v27.4s\n"
+      "fadd v7.4s, v17.4s, v4.4s\n"
+      "fsub v11.4s, v24.4s, v11.4s\n"
+      "str q1, [%[outptr0], x16]\n"  // Store output (0, 3)
+      "mov v4.16b, v4.16b\n"
+      "str q6, [x28, x16]\n"  // Store output (2, 3)
+      "fadd v2.4s, v2.4s, v8.4s\n"
+      "fmla v9.4s, v8.4s, v0.s[1]\n"
+      "add %[outptr0], %[outptr0], #16\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "add x28, x28, #16\n"
+      "fmul v11.4s, v11.4s, v0.s[0]\n"
+      "fmla v4.4s, v10.4s, v0.s[1]\n"
+      "str q2, [x17, x16]\n"  // Store output (1, 3)
+      "mov v1.16b, v13.16b\n"
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "add x17, x17, #16\n"
+      "fadd v8.4s, v13.4s, v11.4s\n"
+      "fmla v1.4s, v11.4s, v0.s[1]\n"
+      "str q9, [x18, x16]\n"  // Store output (3, 3)
+      "add x18, x18, #16\n"
+      "fadd v1.4s, v1.4s, v12.4s\n"
+      "bne 2b\n"
+      "3:\n"  // Quad tail
+      "fadd v2.4s, v19.4s, v22.4s\n"
+      "ldr q16, [x13]\n"
+      "fadd v23.4s, v14.4s, v18.4s\n"
+      "ldr q21, [x13, %[in_col_stride1]]\n"
+      "fsub v15.4s, v19.4s, v22.4s\n"
+      "ldr q24, [x13, x21]\n"
+      "fsub v31.4s, v14.4s, v18.4s\n"
+      "ldr q25, [x13, x22]\n"
+      "fadd v11.4s, v20.4s, v2.4s\n"
+      "ldr q17, [x13, x23]\n"
+      "mov v13.16b, v2.16b\n"
+      "ldr q9, [x13, x24]\n"
+      "mov v2.16b, v15.16b\n"
+      "ldr q6, [x26]\n"
+      "fmul v31.4s, v31.4s, v0.s[0]\n"
+      "ldr q19, [x26, %[in_col_stride1]]\n"
+      "fadd v11.4s, v11.4s, v23.4s\n"
+      "ldr q22, [x26, x21]\n"
+      "fmla v13.4s, v23.4s, v0.s[1]\n"
+      "ldr q12, [x26, x22]\n"
+      "fadd v29.4s, v21.4s, v24.4s\n"
+      "ldr q26, [x26, x23]\n"
+      "fadd v15.4s, v15.4s, v31.4s\n"
+      "ldr q5, [x26, x24]\n"
+      "fmla v2.4s, v31.4s, v0.s[1]\n"
+      "ldr q10, [x14]\n"
+      "fadd v18.4s, v25.4s, v17.4s\n"
+      "add %[inptr0], %[inptr0], #16\n"
+      "fadd v27.4s, v16.4s, v29.4s\n"
+      "add x25, x25, #16\n"
+      "fsub v14.4s, v21.4s, v24.4s\n"
+      "ldr q30, [x14, %[in_col_stride1]]\n"
+      "fadd v2.4s, v2.4s, v3.4s\n"
+      "ldr q31, [x14, x21]\n"
+      "fsub v28.4s, v25.4s, v17.4s\n"
+      "add x13, x13, #16\n"
+      "fadd v27.4s, v27.4s, v18.4s\n"
+      "add x26, x26, #16\n"
+      "mov v21.16b, v29.16b\n"
+      "fadd v20.4s, v19.4s, v22.4s\n"
+      "fsub v17.4s, v19.4s, v22.4s\n"
+      "fadd v29.4s, v12.4s, v26.4s\n"
+      "fmul v28.4s, v28.4s, v0.s[0]\n"
+      "fsub v16.4s, v12.4s, v26.4s\n"
+      "fmla v21.4s, v18.4s, v0.s[1]\n"
+      "ldr q23, [x14, x22]\n"
+      "fadd v24.4s, v6.4s, v20.4s\n"
+      "mov v6.16b, v20.16b\n"
+      "fadd v25.4s, v30.4s, v31.4s\n"
+      "fsub v22.4s, v30.4s, v31.4s\n"
+      "fadd v20.4s, v14.4s, v28.4s\n"
+      "mov v3.16b, v14.16b\n"
+      "fmul v16.4s, v16.4s, v0.s[0]\n"
+      "fmla v6.4s, v29.4s, v0.s[1]\n"
+      "fadd v24.4s, v24.4s, v29.4s\n"
+      "ldr q26, [x14, x23]\n"
+      "fmla v3.4s, v28.4s, v0.s[1]\n"
+      "fadd v14.4s, v23.4s, v26.4s\n"
+      "fadd v29.4s, v10.4s, v25.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "mov v10.16b, v25.16b\n"
+      "fadd v31.4s, v11.4s, v27.4s\n"
+      "fsub v12.4s, v11.4s, v27.4s\n"
+      "ldr q18, [x14, x24]\n"
+      "fadd v3.4s, v3.4s, v9.4s\n"
+      "ldr q19, [x27]\n"
+      "fadd v29.4s, v29.4s, v14.4s\n"
+      "add x14, x14, #16\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fmla v10.4s, v14.4s, v0.s[1]\n"
+      "fadd v25.4s, v7.4s, v31.4s\n"
+      "mov v26.16b, v31.16b\n"
+      "fadd v31.4s, v15.4s, v20.4s\n"
+      "fsub v11.4s, v15.4s, v20.4s\n"
+      "fadd v28.4s, v24.4s, v29.4s\n"
+      "fsub v24.4s, v24.4s, v29.4s\n"
+      "fadd v30.4s, v13.4s, v21.4s\n"
+      "fsub v9.4s, v13.4s, v21.4s\n"
+      "fadd v20.4s, v17.4s, v16.4s\n"
+      "mov v7.16b, v17.16b\n"
+      "fadd v15.4s, v8.4s, v31.4s\n"
+      "mov v14.16b, v31.16b\n"
+      "fadd v25.4s, v25.4s, v28.4s\n"
+      "fmul v24.4s, v24.4s, v0.s[0]\n"
+      "fmla v7.4s, v16.4s, v0.s[1]\n"
+      "ldr q27, [x27, %[in_col_stride1]]\n"
+      "fmla v26.4s, v28.4s, v0.s[1]\n"
+      "ldr q29, [x27, x21]\n"
+      "fadd v13.4s, v4.4s, v30.4s\n"
+      "mov v4.16b, v30.16b\n"
+      "str q25, [%[outptr0]]\n"  // Store output (0, 0)
+      "fadd v17.4s, v22.4s, v23.4s\n"
+      "fadd v7.4s, v7.4s, v5.4s\n"
+      "ldr q28, [x27, x22]\n"
+      "str q26, [x28]\n"  // Store output (2, 0)
+      "mov v8.16b, v22.16b\n"
+      "fadd v16.4s, v27.4s, v29.4s\n"
+      "fsub v29.4s, v27.4s, v29.4s\n"
+      "fadd v21.4s, v12.4s, v24.4s\n"
+      "mov v26.16b, v12.16b\n"
+      "fmla v8.4s, v23.4s, v0.s[1]\n"
+      "fadd v22.4s, v20.4s, v17.4s\n"
+      "fsub v20.4s, v20.4s, v17.4s\n"
+      "ldr q23, [x27, x23]\n"
+      "fadd v19.4s, v19.4s, v16.4s\n"
+      "mov v16.16b, v16.16b\n"
+      "str q21, [x17]\n"  // Store output (1, 0)
+      "fadd v30.4s, v28.4s, v23.4s\n"
+      "fadd v8.4s, v8.4s, v18.4s\n"
+      "ldr q25, [x27, x24]\n"
+      "fsub v27.4s, v28.4s, v23.4s\n"
+      "add x27, x27, #16\n"
+      "mov v5.16b, v29.16b\n"
+      "fmla v26.4s, v24.4s, v0.s[1]\n"
+      "fadd v19.4s, v19.4s, v30.4s\n"
+      "fmla v16.4s, v30.4s, v0.s[1]\n"
+      "fadd v15.4s, v15.4s, v22.4s\n"
+      "fmul v20.4s, v20.4s, v0.s[0]\n"
+      "fmul v27.4s, v27.4s, v0.s[0]\n"
+      "fmla v14.4s, v22.4s, v0.s[1]\n"
+      "mov v28.16b, v11.16b\n"
+      "fadd v21.4s, v6.4s, v10.4s\n"
+      "fadd v26.4s, v26.4s, v19.4s\n"
+      "fsub v10.4s, v6.4s, v10.4s\n"
+      "str q15, [%[outptr0], %[output_col_stride1]]\n"  // Store output (0, 1)
+      "fadd v12.4s, v11.4s, v20.4s\n"
+      "str q14, [x28, %[output_col_stride1]]\n"  // Store output (2, 1)
+      "fadd v18.4s, v29.4s, v27.4s\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "fmla v28.4s, v20.4s, v0.s[1]\n"
+      "str q26, [x18]\n"  // Store output (3, 0)
+      "fadd v13.4s, v13.4s, v21.4s\n"
+      "str q12, [x17, %[output_col_stride1]]\n"  // Store output (1, 1)
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "fmla v4.4s, v21.4s, v0.s[1]\n"
+      "mov v15.16b, v9.16b\n"
+      "fadd v5.4s, v5.4s, v25.4s\n"
+      "fadd v28.4s, v28.4s, v18.4s\n"
+      "str q13, [%[outptr0], x15]\n"  // Store output (0, 2)
+      "fadd v6.4s, v2.4s, v3.4s\n"
+      "fadd v13.4s, v9.4s, v10.4s\n"
+      "fmla v15.4s, v10.4s, v0.s[1]\n"
+      "str q4, [x28, x15]\n"  // Store output (2, 2)
+      "fadd v30.4s, v7.4s, v8.4s\n"
+      "str q28, [x18, %[output_col_stride1]]\n"  // Store output (3, 1)
+      "fsub v2.4s, v2.4s, v3.4s\n"
+      "fadd v1.4s, v1.4s, v6.4s\n"
+      "fsub v8.4s, v7.4s, v8.4s\n"
+      "str q13, [x17, x15]\n"  // Store output (1, 2)
+      "fadd v15.4s, v15.4s, v16.4s\n"
+      "mov v6.16b, v6.16b\n"
+      "mov v9.16b, v2.16b\n"
+      "fadd v1.4s, v1.4s, v30.4s\n"
+      "fmul v8.4s, v8.4s, v0.s[0]\n"
+      "str q15, [x18, x15]\n"  // Store output (3, 2)
+      "fmla v6.4s, v30.4s, v0.s[1]\n"
+      "str q1, [%[outptr0], x16]\n"  // Store output (0, 3)
+      "fadd v2.4s, v2.4s, v8.4s\n"
+      "str q6, [x28, x16]\n"  // Store output (2, 3)
+      "fmla v9.4s, v8.4s, v0.s[1]\n"
+      "add %[outptr0], %[outptr0], #16\n"
+      "add x28, x28, #16\n"
+      "str q2, [x17, x16]\n"  // Store output (1, 3)
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "add x17, x17, #16\n"
+      "str q9, [x18, x16]\n"  // Store output (3, 3)
+      "add x18, x18, #16\n"
+      "4:\n"  // Double
+      "cmp x20, #2\n"
+      "blt 5f\n"
+      "ldr d17, [%[inptr0]]\n"
+      "ldr d23, [%[inptr0], %[in_col_stride1]]\n"
+      "sub x20, x20, #2\n"
+      "ldr d27, [%[inptr0], x21]\n"
+      "ldr d24, [%[inptr0], x22]\n"
+      "fadd v4.4s, v23.4s, v27.4s\n"
+      "ldr d11, [%[inptr0], x23]\n"
+      "fadd v10.4s, v24.4s, v11.4s\n"
+      "ldr d12, [%[inptr0], x24]\n"
+      "fsub v13.4s, v23.4s, v27.4s\n"
+      "ldr d20, [x25]\n"
+      "fsub v11.4s, v24.4s, v11.4s\n"
+      "ldr d19, [x25, %[in_col_stride1]]\n"
+      "fadd v7.4s, v17.4s, v4.4s\n"
+      "ldr d22, [x25, x21]\n"
+      "mov v4.16b, v4.16b\n"
+      "ldr d14, [x25, x22]\n"
+      "mov v1.16b, v13.16b\n"
+      "ldr d18, [x25, x23]\n"
+      "fmul v11.4s, v11.4s, v0.s[0]\n"
+      "ldr d3, [x25, x24]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "ldr d16, [x13]\n"
+      "fmla v4.4s, v10.4s, v0.s[1]\n"
+      "ldr d21, [x13, %[in_col_stride1]]\n"
+      "fadd v2.4s, v19.4s, v22.4s\n"
+      "ldr d24, [x13, x21]\n"
+      "fadd v8.4s, v13.4s, v11.4s\n"
+      "ldr d25, [x13, x22]\n"
+      "fmla v1.4s, v11.4s, v0.s[1]\n"
+      "ldr d17, [x13, x23]\n"
+      "fadd v23.4s, v14.4s, v18.4s\n"
+      "ldr d9, [x13, x24]\n"
+      "fadd v11.4s, v20.4s, v2.4s\n"
+      "ldr d6, [x26]\n"
+      "fsub v15.4s, v19.4s, v22.4s\n"
+      "ldr d19, [x26, %[in_col_stride1]]\n"
+      "fadd v1.4s, v1.4s, v12.4s\n"
+      "ldr d22, [x26, x21]\n"
+      "fsub v31.4s, v14.4s, v18.4s\n"
+      "ldr d12, [x26, x22]\n"
+      "fadd v11.4s, v11.4s, v23.4s\n"
+      "ldr d26, [x26, x23]\n"
+      "mov v13.16b, v2.16b\n"
+      "ldr d5, [x26, x24]\n"
+      "mov v2.16b, v15.16b\n"
+      "ldr d10, [x14]\n"
+      "fmul v31.4s, v31.4s, v0.s[0]\n"
+      "add %[inptr0], %[inptr0], #8\n"
+      "fmla v13.4s, v23.4s, v0.s[1]\n"
+      "add x25, x25, #8\n"
+      "fadd v29.4s, v21.4s, v24.4s\n"
+      "add x13, x13, #8\n"
+      "fsub v14.4s, v21.4s, v24.4s\n"
+      "ldr d30, [x14, %[in_col_stride1]]\n"
+      "fadd v15.4s, v15.4s, v31.4s\n"
+      "add x26, x26, #8\n"
+      "fmla v2.4s, v31.4s, v0.s[1]\n"
+      "fadd v18.4s, v25.4s, v17.4s\n"
+      "fadd v27.4s, v16.4s, v29.4s\n"
+      "fsub v28.4s, v25.4s, v17.4s\n"
+      "mov v21.16b, v29.16b\n"
+      "fadd v20.4s, v19.4s, v22.4s\n"
+      "fsub v17.4s, v19.4s, v22.4s\n"
+      "ldr d31, [x14, x21]\n"
+      "fadd v2.4s, v2.4s, v3.4s\n"
+      "ldr d23, [x14, x22]\n"
+      "fadd v27.4s, v27.4s, v18.4s\n"
+      "fmul v28.4s, v28.4s, v0.s[0]\n"
+      "fmla v21.4s, v18.4s, v0.s[1]\n"
+      "fadd v29.4s, v12.4s, v26.4s\n"
+      "fadd v24.4s, v6.4s, v20.4s\n"
+      "fsub v16.4s, v12.4s, v26.4s\n"
+      "mov v6.16b, v20.16b\n"
+      "fadd v25.4s, v30.4s, v31.4s\n"
+      "fsub v22.4s, v30.4s, v31.4s\n"
+      "fadd v31.4s, v11.4s, v27.4s\n"
+      "fsub v12.4s, v11.4s, v27.4s\n"
+      "ldr d26, [x14, x23]\n"
+      "fadd v24.4s, v24.4s, v29.4s\n"
+      "fmul v16.4s, v16.4s, v0.s[0]\n"
+      "fmla v6.4s, v29.4s, v0.s[1]\n"
+      "mov v3.16b, v14.16b\n"
+      "fadd v20.4s, v14.4s, v28.4s\n"
+      "fadd v29.4s, v10.4s, v25.4s\n"
+      "mov v10.16b, v25.16b\n"
+      "fadd v25.4s, v7.4s, v31.4s\n"
+      "fmla v3.4s, v28.4s, v0.s[1]\n"
+      "fadd v14.4s, v23.4s, v26.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "mov v26.16b, v31.16b\n"
+      "fadd v31.4s, v15.4s, v20.4s\n"
+      "fsub v11.4s, v15.4s, v20.4s\n"
+      "fadd v20.4s, v17.4s, v16.4s\n"
+      "mov v7.16b, v17.16b\n"
+      "fadd v3.4s, v3.4s, v9.4s\n"
+      "ldr d18, [x14, x24]\n"
+      "fadd v29.4s, v29.4s, v14.4s\n"
+      "add x14, x14, #8\n"
+      "fmla v7.4s, v16.4s, v0.s[1]\n"
+      "ldr d19, [x27]\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fmla v10.4s, v14.4s, v0.s[1]\n"
+      "fadd v15.4s, v8.4s, v31.4s\n"
+      "mov v14.16b, v31.16b\n"
+      "fadd v28.4s, v24.4s, v29.4s\n"
+      "fsub v24.4s, v24.4s, v29.4s\n"
+      "fadd v7.4s, v7.4s, v5.4s\n"
+      "ldr d27, [x27, %[in_col_stride1]]\n"
+      "fadd v30.4s, v13.4s, v21.4s\n"
+      "fsub v9.4s, v13.4s, v21.4s\n"
+      "fadd v17.4s, v22.4s, v23.4s\n"
+      "mov v8.16b, v22.16b\n"
+      "fadd v25.4s, v25.4s, v28.4s\n"
+      "fmul v24.4s, v24.4s, v0.s[0]\n"
+      "fmla v26.4s, v28.4s, v0.s[1]\n"
+      "ldr d29, [x27, x21]\n"
+      "fmla v8.4s, v23.4s, v0.s[1]\n"
+      "ldr d28, [x27, x22]\n"
+      "fadd v13.4s, v4.4s, v30.4s\n"
+      "mov v4.16b, v30.16b\n"
+      "str d25, [%[outptr0]]\n"  // Store output (0, 0)
+      "fadd v16.4s, v27.4s, v29.4s\n"
+      "str d26, [x28]\n"  // Store output (2, 0)
+      "fsub v29.4s, v27.4s, v29.4s\n"
+      "fadd v8.4s, v8.4s, v18.4s\n"
+      "ldr d23, [x27, x23]\n"
+      "fadd v30.4s, v28.4s, v23.4s\n"
+      "ldr d25, [x27, x24]\n"
+      "fadd v19.4s, v19.4s, v16.4s\n"
+      "add x27, x27, #8\n"
+      "fsub v27.4s, v28.4s, v23.4s\n"
+      "mov v16.16b, v16.16b\n"
+      "fadd v22.4s, v20.4s, v17.4s\n"
+      "fsub v20.4s, v20.4s, v17.4s\n"
+      "fadd v21.4s, v12.4s, v24.4s\n"
+      "mov v26.16b, v12.16b\n"
+      "fadd v19.4s, v19.4s, v30.4s\n"
+      "fmla v16.4s, v30.4s, v0.s[1]\n"
+      "fmul v27.4s, v27.4s, v0.s[0]\n"
+      "mov v5.16b, v29.16b\n"
+      "fmla v26.4s, v24.4s, v0.s[1]\n"
+      "fadd v15.4s, v15.4s, v22.4s\n"
+      "str d21, [x17]\n"  // Store output (1, 0)
+      "fmul v20.4s, v20.4s, v0.s[0]\n"
+      "fmla v14.4s, v22.4s, v0.s[1]\n"
+      "mov v28.16b, v11.16b\n"
+      "fadd v18.4s, v29.4s, v27.4s\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "str d15, [%[outptr0], %[output_col_stride1]]\n"  // Store output (0, 1)
+      "fadd v26.4s, v26.4s, v19.4s\n"
+      "fadd v12.4s, v11.4s, v20.4s\n"
+      "fmla v28.4s, v20.4s, v0.s[1]\n"
+      "str d14, [x28, %[output_col_stride1]]\n"  // Store output (2, 1)
+      "fadd v21.4s, v6.4s, v10.4s\n"
+      "fadd v5.4s, v5.4s, v25.4s\n"
+      "fsub v10.4s, v6.4s, v10.4s\n"
+      "str d26, [x18]\n"  // Store output (3, 0)
+      "mov v15.16b, v9.16b\n"
+      "str d12, [x17, %[output_col_stride1]]\n"  // Store output (1, 1)
+      "fadd v28.4s, v28.4s, v18.4s\n"
+      "fadd v13.4s, v13.4s, v21.4s\n"
+      "fmla v4.4s, v21.4s, v0.s[1]\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "fadd v6.4s, v2.4s, v3.4s\n"
+      "fadd v30.4s, v7.4s, v8.4s\n"
+      "fsub v2.4s, v2.4s, v3.4s\n"
+      "str d28, [x18, %[output_col_stride1]]\n"  // Store output (3, 1)
+      "fsub v8.4s, v7.4s, v8.4s\n"
+      "str d13, [%[outptr0], x15]\n"  // Store output (0, 2)
+      "str d4, [x28, x15]\n"  // Store output (2, 2)
+      "fadd v13.4s, v9.4s, v10.4s\n"
+      "fmla v15.4s, v10.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v6.4s\n"
+      "mov v6.16b, v6.16b\n"
+      "fmul v8.4s, v8.4s, v0.s[0]\n"
+      "mov v9.16b, v2.16b\n"
+      "str d13, [x17, x15]\n"  // Store output (1, 2)
+      "fadd v15.4s, v15.4s, v16.4s\n"
+      "fadd v1.4s, v1.4s, v30.4s\n"
+      "fmla v6.4s, v30.4s, v0.s[1]\n"
+      "fadd v2.4s, v2.4s, v8.4s\n"
+      "fmla v9.4s, v8.4s, v0.s[1]\n"
+      "str d15, [x18, x15]\n"  // Store output (3, 2)
+      "str d1, [%[outptr0], x16]\n"  // Store output (0, 3)
+      "str d2, [x17, x16]\n"  // Store output (1, 3)
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "str d6, [x28, x16]\n"  // Store output (2, 3)
+      "add %[outptr0], %[outptr0], #8\n"
+      "add x17, x17, #8\n"
+      "add x28, x28, #8\n"
+      "str d9, [x18, x16]\n"  // Store output (3, 3)
+      "add x18, x18, #8\n"
+      "5:\n"  // Scalar
+      "cbz x20, 6f\n"
+      "ldr s17, [%[inptr0]]\n"
+      "ldr s23, [%[inptr0], %[in_col_stride1]]\n"
+      "ldr s27, [%[inptr0], x21]\n"
+      "fadd v4.4s, v23.4s, v27.4s\n"
+      "ldr s24, [%[inptr0], x22]\n"
+      "fsub v13.4s, v23.4s, v27.4s\n"
+      "ldr s11, [%[inptr0], x23]\n"
+      "fadd v10.4s, v24.4s, v11.4s\n"
+      "ldr s12, [%[inptr0], x24]\n"
+      "fsub v11.4s, v24.4s, v11.4s\n"
+      "ldr s20, [x25]\n"
+      "fadd v7.4s, v17.4s, v4.4s\n"
+      "ldr s19, [x25, %[in_col_stride1]]\n"
+      "mov v4.16b, v4.16b\n"
+      "ldr s22, [x25, x21]\n"
+      "mov v1.16b, v13.16b\n"
+      "ldr s14, [x25, x22]\n"
+      "fmul v11.4s, v11.4s, v0.s[0]\n"
+      "ldr s18, [x25, x23]\n"
+      "fadd v7.4s, v7.4s, v10.4s\n"
+      "ldr s3, [x25, x24]\n"
+      "fmla v4.4s, v10.4s, v0.s[1]\n"
+      "ldr s16, [x13]\n"
+      "fadd v2.4s, v19.4s, v22.4s\n"
+      "ldr s21, [x13, %[in_col_stride1]]\n"
+      "fadd v8.4s, v13.4s, v11.4s\n"
+      "ldr s24, [x13, x21]\n"
+      "fmla v1.4s, v11.4s, v0.s[1]\n"
+      "ldr s25, [x13, x22]\n"
+      "fadd v23.4s, v14.4s, v18.4s\n"
+      "ldr s17, [x13, x23]\n"
+      "fadd v11.4s, v20.4s, v2.4s\n"
+      "ldr s9, [x13, x24]\n"
+      "fsub v15.4s, v19.4s, v22.4s\n"
+      "ldr s6, [x26]\n"
+      "fadd v1.4s, v1.4s, v12.4s\n"
+      "ldr s19, [x26, %[in_col_stride1]]\n"
+      "fsub v31.4s, v14.4s, v18.4s\n"
+      "ldr s22, [x26, x21]\n"
+      "fadd v11.4s, v11.4s, v23.4s\n"
+      "ldr s12, [x26, x22]\n"
+      "mov v13.16b, v2.16b\n"
+      "ldr s26, [x26, x23]\n"
+      "mov v2.16b, v15.16b\n"
+      "ldr s5, [x26, x24]\n"
+      "fmul v31.4s, v31.4s, v0.s[0]\n"
+      "ldr s10, [x14]\n"
+      "fmla v13.4s, v23.4s, v0.s[1]\n"
+      "fadd v29.4s, v21.4s, v24.4s\n"
+      "fsub v14.4s, v21.4s, v24.4s\n"
+      "fadd v18.4s, v25.4s, v17.4s\n"
+      "fsub v28.4s, v25.4s, v17.4s\n"
+      "ldr s30, [x14, %[in_col_stride1]]\n"
+      "fadd v15.4s, v15.4s, v31.4s\n"
+      "fmla v2.4s, v31.4s, v0.s[1]\n"
+      "fadd v27.4s, v16.4s, v29.4s\n"
+      "mov v21.16b, v29.16b\n"
+      "fadd v20.4s, v19.4s, v22.4s\n"
+      "fsub v17.4s, v19.4s, v22.4s\n"
+      "fmul v28.4s, v28.4s, v0.s[0]\n"
+      "ldr s31, [x14, x21]\n"
+      "fadd v2.4s, v2.4s, v3.4s\n"
+      "ldr s23, [x14, x22]\n"
+      "fadd v27.4s, v27.4s, v18.4s\n"
+      "fmla v21.4s, v18.4s, v0.s[1]\n"
+      "fadd v29.4s, v12.4s, v26.4s\n"
+      "fadd v24.4s, v6.4s, v20.4s\n"
+      "fsub v16.4s, v12.4s, v26.4s\n"
+      "mov v6.16b, v20.16b\n"
+      "fadd v25.4s, v30.4s, v31.4s\n"
+      "fsub v22.4s, v30.4s, v31.4s\n"
+      "fadd v20.4s, v14.4s, v28.4s\n"
+      "mov v3.16b, v14.16b\n"
+      "fadd v24.4s, v24.4s, v29.4s\n"
+      "fmla v6.4s, v29.4s, v0.s[1]\n"
+      "fmul v16.4s, v16.4s, v0.s[0]\n"
+      "ldr s26, [x14, x23]\n"
+      "fmla v3.4s, v28.4s, v0.s[1]\n"
+      "fadd v14.4s, v23.4s, v26.4s\n"
+      "fadd v29.4s, v10.4s, v25.4s\n"
+      "fsub v23.4s, v23.4s, v26.4s\n"
+      "mov v10.16b, v25.16b\n"
+      "fadd v31.4s, v11.4s, v27.4s\n"
+      "fsub v12.4s, v11.4s, v27.4s\n"
+      "ldr s18, [x14, x24]\n"
+      "fadd v3.4s, v3.4s, v9.4s\n"
+      "ldr s19, [x27]\n"
+      "fadd v29.4s, v29.4s, v14.4s\n"
+      "fmul v23.4s, v23.4s, v0.s[0]\n"
+      "fmla v10.4s, v14.4s, v0.s[1]\n"
+      "fadd v25.4s, v7.4s, v31.4s\n"
+      "mov v26.16b, v31.16b\n"
+      "fadd v31.4s, v15.4s, v20.4s\n"
+      "fsub v11.4s, v15.4s, v20.4s\n"
+      "fadd v30.4s, v13.4s, v21.4s\n"
+      "fsub v9.4s, v13.4s, v21.4s\n"
+      "fadd v28.4s, v24.4s, v29.4s\n"
+      "fsub v24.4s, v24.4s, v29.4s\n"
+      "ldr s27, [x27, %[in_col_stride1]]\n"
+      "fadd v15.4s, v8.4s, v31.4s\n"
+      "mov v14.16b, v31.16b\n"
+      "fadd v13.4s, v4.4s, v30.4s\n"
+      "mov v4.16b, v30.16b\n"
+      "fadd v25.4s, v25.4s, v28.4s\n"
+      "fmla v26.4s, v28.4s, v0.s[1]\n"
+      "fmul v24.4s, v24.4s, v0.s[0]\n"
+      "fadd v21.4s, v6.4s, v10.4s\n"
+      "fsub v10.4s, v6.4s, v10.4s\n"
+      "fadd v6.4s, v2.4s, v3.4s\n"
+      "fsub v2.4s, v2.4s, v3.4s\n"
+      "ldr s29, [x27, x21]\n"
+      "str s25, [%[outptr0]]\n"  // Store output (0, 0)
+      "fadd v20.4s, v17.4s, v16.4s\n"
+      "str s26, [x28]\n"  // Store output (2, 0)
+      "mov v7.16b, v17.16b\n"
+      "fadd v17.4s, v22.4s, v23.4s\n"
+      "mov v8.16b, v22.16b\n"
+      "fadd v13.4s, v13.4s, v21.4s\n"
+      "fmul v10.4s, v10.4s, v0.s[0]\n"
+      "fmla v7.4s, v16.4s, v0.s[1]\n"
+      "ldr s28, [x27, x22]\n"
+      "fmla v8.4s, v23.4s, v0.s[1]\n"
+      "ldr s23, [x27, x23]\n"
+      "fmla v4.4s, v21.4s, v0.s[1]\n"
+      "ldr s25, [x27, x24]\n"
+      "str s13, [%[outptr0], x15]\n"  // Store output (0, 2)
+      "fadd v16.4s, v27.4s, v29.4s\n"
+      "fadd v7.4s, v7.4s, v5.4s\n"
+      "fadd v30.4s, v28.4s, v23.4s\n"
+      "fadd v8.4s, v8.4s, v18.4s\n"
+      "fsub v29.4s, v27.4s, v29.4s\n"
+      "str s4, [x28, x15]\n"  // Store output (2, 2)
+      "fsub v27.4s, v28.4s, v23.4s\n"
+      "fadd v19.4s, v19.4s, v16.4s\n"
+      "mov v16.16b, v16.16b\n"
+      "fadd v21.4s, v12.4s, v24.4s\n"
+      "mov v26.16b, v12.16b\n"
+      "mov v5.16b, v29.16b\n"
+      "fadd v22.4s, v20.4s, v17.4s\n"
+      "fmul v27.4s, v27.4s, v0.s[0]\n"
+      "fmla v16.4s, v30.4s, v0.s[1]\n"
+      "fadd v19.4s, v19.4s, v30.4s\n"
+      "fmla v26.4s, v24.4s, v0.s[1]\n"
+      "str s21, [x17]\n"  // Store output (1, 0)
+      "fsub v20.4s, v20.4s, v17.4s\n"
+      "fadd v15.4s, v15.4s, v22.4s\n"
+      "fmla v14.4s, v22.4s, v0.s[1]\n"
+      "fadd v18.4s, v29.4s, v27.4s\n"
+      "fmla v5.4s, v27.4s, v0.s[1]\n"
+      "fadd v26.4s, v26.4s, v19.4s\n"
+      "mov v28.16b, v11.16b\n"
+      "fmul v20.4s, v20.4s, v0.s[0]\n"
+      "fadd v13.4s, v9.4s, v10.4s\n"
+      "str s15, [%[outptr0], %[output_col_stride1]]\n"  // Store output (0, 1)
+      "mov v15.16b, v9.16b\n"
+      "str s14, [x28, %[output_col_stride1]]\n"  // Store output (2, 1)
+      "fadd v5.4s, v5.4s, v25.4s\n"
+      "str s26, [x18]\n"  // Store output (3, 0)
+      "fadd v30.4s, v7.4s, v8.4s\n"
+      "str s13, [x17, x15]\n"  // Store output (1, 2)
+      "fadd v12.4s, v11.4s, v20.4s\n"
+      "fmla v28.4s, v20.4s, v0.s[1]\n"
+      "fmla v15.4s, v10.4s, v0.s[1]\n"
+      "fadd v1.4s, v1.4s, v6.4s\n"
+      "fsub v8.4s, v7.4s, v8.4s\n"
+      "mov v6.16b, v6.16b\n"
+      "mov v9.16b, v2.16b\n"
+      "str s12, [x17, %[output_col_stride1]]\n"  // Store output (1, 1)
+      "fadd v28.4s, v28.4s, v18.4s\n"
+      "fadd v15.4s, v15.4s, v16.4s\n"
+      "fadd v1.4s, v1.4s, v30.4s\n"
+      "fmul v8.4s, v8.4s, v0.s[0]\n"
+      "fmla v6.4s, v30.4s, v0.s[1]\n"
+      "str s28, [x18, %[output_col_stride1]]\n"  // Store output (3, 1)
+      "str s1, [%[outptr0], x16]\n"  // Store output (0, 3)
+      "str s6, [x28, x16]\n"  // Store output (2, 3)
+      "fadd v2.4s, v2.4s, v8.4s\n"
+      "str s15, [x18, x15]\n"  // Store output (3, 2)
+      "fmla v9.4s, v8.4s, v0.s[1]\n"
+      "str s2, [x17, x16]\n"  // Store output (1, 3)
+      "fadd v9.4s, v9.4s, v5.4s\n"
+      "str s9, [x18, x16]\n"  // Store output (3, 3)
+      "6:\n"  // End
+      : [outptr0] "+r" (output), [inptr0] "+r" (inptr)
+      : [output_col_stride1] "r" (output_col_stride * sizeof(float)), [pcoeffs] "r" (coeffs), [n_channels] "r" ((long) n_channels), [in_row_stride] "r" (6 * matrix_stride * sizeof(float)), [in_col_stride1] "r" (matrix_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float))
+      : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+    );
+  }
+}
+
+#else
+
+template <>
+void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile(
+  const int n_channels,
+  const float* inptr,
+  const int matrix_stride,
+  const float* bptr,
+  float* const output,
+  const int output_row_stride,
+  const int output_col_stride
+)
+{
+  // Construct a map to the output cells
+  float *outptrs[output_tile_rows][output_tile_cols];
+  for (int i = 0; i < output_tile_rows; i++)
+  {
+    for (int j = 0; j < output_tile_cols; j++)
+    {
+      outptrs[i][j] = output + i*output_row_stride + j*output_col_stride;
+    }
+  }
+
+  // For each channel of the output
+  int channels_remaining = n_channels;
+#ifdef __arm__
+  for (; channels_remaining >= 2; channels_remaining -= 2)
+  {
+    // Matrices used and computed during this transform
+    float32x2_t F[6][6], FZ[6][4], f[4][4], b;
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 6; i++)
+    {
+      for (int j = 0; j < 6; j++, m++)
+      {
+        F[i][j] = vld1_f32(inptr + m*matrix_stride);
+      }
+    }
+    inptr += 2;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 6; i++)
+    {
+      // FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
+      FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]);
+
+      // FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
+      FZ[i][1] = vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 2.0f);
+
+      // FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
+      FZ[i][2] = vmla_n_f32(vadd_f32(F[i][1], F[i][2]), vadd_f32(F[i][3], F[i][4]), 4.0f);
+
+      // FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
+      FZ[i][3] = vadd_f32(vmla_n_f32(vsub_f32(F[i][1], F[i][2]), vsub_f32(F[i][3], F[i][4]), 8.0f), F[i][5]);
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 4; j++)
+    {
+      // f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
+      f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]);
+
+      // f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
+      f[1][j] = vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 2.0f);
+
+      // f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
+      f[2][j] = vmla_n_f32(vadd_f32(FZ[1][j], FZ[2][j]), vadd_f32(FZ[3][j], FZ[4][j]), 4.0f);
+
+      // f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
+      f[3][j] = vadd_f32(vmla_n_f32(vsub_f32(FZ[1][j], FZ[2][j]), vsub_f32(FZ[3][j], FZ[4][j]), 8.0f), FZ[5][j]);
+    }
+
+    // Write out the output tile
+    if (bptr != nullptr)
+    {
+      b = vld1_f32(bptr);
+      bptr += 2;
+    }
+    else
+    {
+      b = vdup_n_f32(0.0f);
+    }
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        vst1_f32(outptrs[i][j], vadd_f32(f[i][j], b));
+        outptrs[i][j] += 2;
+      }
+    }
+  }
+#endif  // __arm__
+  for (; channels_remaining; channels_remaining--)
+  {
+    // Matrices used and computed during this transform
+    float F[6][6], FZ[6][4], f[4][4], b;
+
+    // Read a 6x6 tile in the Winograd domain
+    for (int i = 0, m = 0; i < 6; i++)
+    {
+      for (int j = 0; j < 6; j++, m++)
+      {
+        F[i][j] = *(inptr + m*matrix_stride);
+      }
+    }
+    inptr++;
+
+    // Compute the matrix F Z
+    for (int i = 0; i < 6; i++)
+    {
+      FZ[i][0] =  1*F[i][0] +  1*F[i][1] +  1*F[i][2] +  1*F[i][3] +  1*F[i][4];
+      FZ[i][1] =  1*F[i][1] + -1*F[i][2] +  2*F[i][3] + -2*F[i][4];
+      FZ[i][2] =  1*F[i][1] +  1*F[i][2] +  4*F[i][3] +  4*F[i][4];
+      FZ[i][3] =  1*F[i][1] + -1*F[i][2] +  8*F[i][3] + -8*F[i][4] +  1*F[i][5];
+    }
+
+    // Compute the output tile f = ZT F Z
+    for (int j = 0; j < 4; j++)
+    {
+      f[0][j] =  1*FZ[0][j] +  1*FZ[1][j] +  1*FZ[2][j] +  1*FZ[3][j] +  1*FZ[4][j];
+      f[1][j] =  1*FZ[1][j] + -1*FZ[2][j] +  2*FZ[3][j] + -2*FZ[4][j];
+      f[2][j] =  1*FZ[1][j] +  1*FZ[2][j] +  4*FZ[3][j] +  4*FZ[4][j];
+      f[3][j] =  1*FZ[1][j] + -1*FZ[2][j] +  8*FZ[3][j] + -8*FZ[4][j] +  1*FZ[5][j];
+    }
+
+    // Write out the output tile
+    if (bptr != nullptr)
+    {
+      b = *(bptr++);
+    }
+    else
+    {
+      b = 0.0f;
+    }
+    for (int i = 0; i < output_tile_rows; i++)
+    {
+      for (int j = 0; j < output_tile_cols; j++)
+      {
+        *(outptrs[i][j]++) = f[i][j] + b;
+      }
+    }
+  }
+}
+
+#endif
+
+template class OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
similarity index 74%
rename from src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
rename to src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
index 58bed71..ce921ce 100644
--- a/src/core/NEON/kernels/convolution/winograd/transforms/output_6_3_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,42 +22,29 @@
  * SOFTWARE.
  */
 
-#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "output.hpp"
+#include "arm.hpp"
 
-namespace
+namespace winograd
 {
 
-template <bool Specialized, int PadRight=0>
-void winograd_output_transform_6_3_fp32_process_tile(
+template <>
+void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transform_tile(
   const int n_channels,
-  const float* const matrix_base,
+  const float* inptr,
   const int matrix_stride,
-  const float* const biases,
+  const float* bptr,
   float* const output,
-  const int output_row_stride,
-  const int output_col_stride,
-  const int _pad_bottom,
-  const int _pad_right
+  const int,  // No need to stride across rows
+  const int output_col_stride
 )
 {
-  (void) output_row_stride;
-  (void) _pad_bottom;
-  constexpr int output_tile_cols = 6;
-  constexpr int inner_tile_cols = 8;
-
-  const int pad_right = Specialized ? PadRight : _pad_right;
-  const int cells_j = output_tile_cols - pad_right;
-
   // Construct a map to the output cells
-  float *outptrs[cells_j];
-  for (int j = 0; j < cells_j; j++)
+  float *outptrs[output_tile_cols];
+  for (int j = 0; j < output_tile_cols; j++)
   {
     outptrs[j] = output + j*output_col_stride;
   }
-  const float *inptr = matrix_base;
-  const float *bptr = biases;
 
   // For each channel of the output
   int channels_remaining = n_channels;
@@ -87,7 +74,7 @@
       b = vld1q_f32(bptr);
       bptr += 4;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1q_f32(outptrs[j], f[j] + b);
       outptrs[j] += 4;
@@ -118,7 +105,7 @@
       b = vld1_f32(bptr);
       bptr += 2;
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       vst1_f32(outptrs[j], f[j] + b);
       outptrs[j] += 2;
@@ -149,31 +136,14 @@
     {
       b = *(bptr++);
     }
-    for (int j = 0; j < cells_j; j++)
+    for (int j = 0; j < output_tile_cols; j++)
     {
       *(outptrs[j]++) = f[j] + b;
     }
   }
 }
 
-}  // namespace (anonymous)
+template class OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
+template class OutputTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
 
-namespace winograd
-{
-using Tiles = OutputTransformImplTiles<1, 3, 1, 8, float>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_unpadded = winograd_output_transform_6_3_fp32_process_tile<true>;
-
-template <>
-const Tiles::TileFn Tiles::tilefn_right_padded[n_pad_right] = {
-  winograd_output_transform_6_3_fp32_process_tile<true, 1>,
-  winograd_output_transform_6_3_fp32_process_tile<true, 2>,
-  winograd_output_transform_6_3_fp32_process_tile<true, 3>,
-  winograd_output_transform_6_3_fp32_process_tile<true, 4>,
-  winograd_output_transform_6_3_fp32_process_tile<true, 5>,
-};
-
-template class OutputTransform<1, 3, 1, 8, float>;
-template class OutputTransform<3, 1, 8, 1, float>;
-}  // namespace winograd
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..37ae43f
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,  // NOTE: Data in HWIO order
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const float *inptrs[kernel_cols];
+  for (int j = 0; j < kernel_cols; j++)
+  {
+    inptrs[j] = input + j*weight_col_stride;
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[kernel_cols], V[inner_tile_cols];
+
+      // Read weights
+      for (int j = 0; j < kernel_cols; j++)
+      {
+        w[j] = *(inptrs[j]++);
+      }
+
+      // Compute V = w WT
+      V[0] = (w[0]*-1) / 36.0f;
+      V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
+      V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
+      V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
+      V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
+      V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
+      V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
+      V[7] = (w[6]*1) / 1.0f;
+
+      // Store the transformed weights
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        *(outptr + j*matrix_stride) = V[j];
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..8fab6db
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  constexpr int inner_tile_i = 4;
+  constexpr int inner_tile_j = 4;
+
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const auto weight_row_stride = 3 * weight_col_stride;
+  const float *inptrs[3][3];
+  for (int i = 0; i < 3; i++)
+  {
+    for (int j = 0; j < 3; j++)
+    {
+      inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+    }
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+    for (; channels_remaining >= 4; channels_remaining -= 4)
+    {
+      // Matrices used and computed in this kernel
+      float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = vld1q_f32(inptrs[i][j]);
+          inptrs[i][j] += 4;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        Ww[0][j] = w[0][j];
+
+        // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+        Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+        // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+        Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+        Ww[3][j] = w[2][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < inner_tile_i; i++)
+      {
+        V[i][0] = Ww[i][0];
+
+        // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+        V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+        // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+        V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+        V[i][3] = Ww[i][2];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < inner_tile_i; i++)
+      {
+        for (int j = 0; j < inner_tile_j; j++, m++)
+        {
+          vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 4;
+    }
+#endif  // __aarch64__
+#ifdef __arm_any__
+    for (; channels_remaining >= 2; channels_remaining -= 2)
+    {
+      // Matrices used and computed in this kernel
+      float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = vld1_f32(inptrs[i][j]);
+          inptrs[i][j] += 2;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        Ww[0][j] = w[0][j];
+
+        // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+        Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+        // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+        Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f);
+
+        Ww[3][j] = w[2][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < inner_tile_i; i++)
+      {
+        V[i][0] = Ww[i][0];
+
+        // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+        V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+        // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+        V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f);
+
+        V[i][3] = Ww[i][2];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < inner_tile_i; i++)
+      {
+        for (int j = 0; j < inner_tile_j; j++, m++)
+        {
+          vst1_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 2;
+    }
+#endif  // __arm_any__
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = *(inptrs[i][j]++);
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        Ww[0][j] = w[0][j];
+        Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]);
+        Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]);
+        Ww[3][j] = w[2][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < inner_tile_i; i++)
+      {
+        V[i][0] = Ww[i][0];
+        V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]);
+        V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]);
+        V[i][3] = Ww[i][2];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < inner_tile_i; i++)
+      {
+        for (int j = 0; j < inner_tile_j; j++, m++)
+        {
+          *(outptr + m*matrix_stride) = V[i][j];
+        }
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>;
+
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..79f4fa3
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const auto weight_row_stride = 5 * weight_col_stride;
+  const float *inptrs[5][5];
+  for (int i = 0; i < 5; i++)
+  {
+    for (int j = 0; j < 5; j++)
+    {
+      inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+    }
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+    for (; channels_remaining >= 4; channels_remaining -= 4)
+    {
+      // Matrices used and computed in this kernel
+      float32x4_t w[5][5], Ww[6][5], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 5; i++)
+      {
+        for (int j = 0; j < 5; j++)
+        {
+          w[i][j] = vld1q_f32(inptrs[i][j]);
+          inptrs[i][j] += 4;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 5; j++)
+      {
+        // Ww[0][j] = w[0][j]/4.0f;
+        Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
+
+        // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+        Ww[1][j] = vmulq_n_f32(
+          vaddq_f32(
+            vaddq_f32(
+              vaddq_f32(w[1][j], w[0][j]),
+              vaddq_f32(w[3][j], w[2][j])
+            ),
+            w[4][j]
+          ),
+          -1.0f/6.0f
+        );
+
+        // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+        // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
+        Ww[2][j] = vmulq_n_f32(
+          vsubq_f32(
+            vaddq_f32(
+              vsubq_f32(w[1][j], w[0][j]),
+              vsubq_f32(w[3][j], w[2][j])
+            ),
+            w[4][j]
+          ),
+          1.0f/6.0f
+        );
+
+        // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+        Ww[3][j] = vmulq_n_f32(
+          vmlaq_n_f32(
+            vaddq_f32(
+              vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
+              vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+            ),
+            w[4][j], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+        Ww[4][j] = vmulq_n_f32(
+          vmlaq_n_f32(
+            vaddq_f32(
+              vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
+              vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+            ),
+            w[4][j], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // Ww[5][j] = w[4][j];
+        Ww[5][j] = w[4][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        // V[i][0] = Ww[i][0]/4.0f;
+        V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
+
+        // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+        V[i][1] = vmulq_n_f32(
+          vaddq_f32(
+            vaddq_f32(
+              vaddq_f32(Ww[i][1], Ww[i][0]),
+              vaddq_f32(Ww[i][3], Ww[i][2])
+            ),
+            Ww[i][4]
+          ),
+          -1.0f/6.0f
+        );
+
+        // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+        // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
+        V[i][2] = vmulq_n_f32(
+          vsubq_f32(
+            vaddq_f32(
+              vsubq_f32(Ww[i][1], Ww[i][0]),
+              vsubq_f32(Ww[i][3], Ww[i][2])
+            ),
+            Ww[i][4]
+          ),
+          1.0f/6.0f
+        );
+
+        // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][3] = vmulq_n_f32(
+          vmlaq_n_f32(
+            vaddq_f32(
+              vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
+              vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+            ),
+            Ww[i][4], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][4] = vmulq_n_f32(
+          vmlaq_n_f32(
+            vaddq_f32(
+              vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
+              vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+            ),
+            Ww[i][4], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // V[i][5] = Ww[i][4];
+        V[i][5] = Ww[i][4];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 4;
+    }
+#endif  // __aarch64__
+#ifdef __arm_any__
+    for (; channels_remaining >= 2; channels_remaining -= 2)
+    {
+      // Matrices used and computed in this kernel
+      float32x2_t w[5][5], Ww[6][5], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 5; i++)
+      {
+        for (int j = 0; j < 5; j++)
+        {
+          w[i][j] = vld1_f32(inptrs[i][j]);
+          inptrs[i][j] += 2;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 5; j++)
+      {
+        // Ww[0][j] = w[0][j]/4.0f;
+        Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
+
+        // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+        Ww[1][j] = vmul_n_f32(
+          vadd_f32(
+            vadd_f32(
+              vadd_f32(w[1][j], w[0][j]),
+              vadd_f32(w[3][j], w[2][j])
+            ),
+            w[4][j]
+          ),
+          -1.0f/6.0f
+        );
+
+        // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+        // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
+        Ww[2][j] = vmul_n_f32(
+          vsub_f32(
+            vadd_f32(
+              vsub_f32(w[1][j], w[0][j]),
+              vsub_f32(w[3][j], w[2][j])
+            ),
+            w[4][j]
+          ),
+          1.0f/6.0f
+        );
+
+        // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+        Ww[3][j] = vmul_n_f32(
+          vmla_n_f32(
+            vadd_f32(
+              vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
+              vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+            ),
+            w[4][j], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+        Ww[4][j] = vmul_n_f32(
+          vmla_n_f32(
+            vadd_f32(
+              vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
+              vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
+            ),
+            w[4][j], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // Ww[5][j] = w[4][j];
+        Ww[5][j] = w[4][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        // V[i][0] = Ww[i][0]/4.0f;
+        V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
+
+        // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+        V[i][1] = vmul_n_f32(
+          vadd_f32(
+            vadd_f32(
+              vadd_f32(Ww[i][1], Ww[i][0]),
+              vadd_f32(Ww[i][3], Ww[i][2])
+            ),
+            Ww[i][4]
+          ),
+          -1.0f/6.0f
+        );
+
+        // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+        // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
+        V[i][2] = vmul_n_f32(
+          vsub_f32(
+            vadd_f32(
+              vsub_f32(Ww[i][1], Ww[i][0]),
+              vsub_f32(Ww[i][3], Ww[i][2])
+            ),
+            Ww[i][4]
+          ),
+          1.0f/6.0f
+        );
+
+        // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][3] = vmul_n_f32(
+          vmla_n_f32(
+            vadd_f32(
+              vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
+              vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+            ),
+            Ww[i][4], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][4] = vmul_n_f32(
+          vmla_n_f32(
+            vadd_f32(
+              vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
+              vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
+            ),
+            Ww[i][4], 2.0f
+          ),
+          1.0f/3.0f
+        );
+
+        // V[i][5] = Ww[i][4];
+        V[i][5] = Ww[i][4];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          vst1_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 2;
+    }
+#endif  // __arm_any__
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[5][5], Ww[6][5], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 5; i++)
+      {
+        for (int j = 0; j < 5; j++)
+        {
+          w[i][j] = *(inptrs[i][j]++);
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 5; j++)
+      {
+        Ww[0][j] = w[0][j]/4.0f;
+        Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
+        Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
+        Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
+        Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
+        Ww[5][j] = w[4][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        V[i][0] = Ww[i][0]/4.0f;
+        V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
+        V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
+        V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
+        V[i][5] = Ww[i][4];
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          *(outptr + m*matrix_stride) = V[i][j];
+        }
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..fb3d712
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,  // NOTE: Data in HWIO order
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const float *inptrs[kernel_cols];
+  for (int j = 0; j < kernel_cols; j++)
+  {
+    inptrs[j] = input + j*weight_col_stride;
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[kernel_cols], V[inner_tile_cols];
+
+      // Read weights
+      for (int j = 0; j < kernel_cols; j++)
+      {
+        w[j] = *(inptrs[j]++);
+      }
+
+      // Compute V = w WT
+      V[0] = (w[0]*-1) / 36;
+      V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
+      V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
+      V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
+      V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
+      V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
+      V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
+      V[7] = (w[4]*1) / 1;
+
+      // Store the transformed weights
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        *(outptr + j*matrix_stride) = V[j];
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+}  // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..9e7040b
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,  // NOTE: Data in HWIO order
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const auto weight_row_stride = 3 * weight_col_stride;
+  const float *inptrs[3][3];
+  for (int i = 0; i < 3; i++)
+  {
+    for (int j = 0; j < 3; j++)
+    {
+      inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
+    }
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+#ifdef __aarch64__
+    for (; channels_remaining >= 4; channels_remaining -= 4)
+    {
+      // Matrices used and computed in this kernel
+      float32x4_t w[3][3], Ww[6][3], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = vld1q_f32(inptrs[i][j]);
+          inptrs[i][j] += 4;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        // Ww[0][j] =  6*w[0][j];
+        Ww[0][j] = vmulq_n_f32(w[0][j], 6.0);
+
+        // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+        Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
+
+        // Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
+        Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
+
+        // Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
+        Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+        // Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
+        Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+        // Ww[5][j] = 24*w[2][j];
+        Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f);
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        const float recip576 = 1.0f / 576.0f;
+
+        // V[i][0] =  6*Ww[i][0];
+        V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576);
+
+        // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
+        V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
+
+        // V[i][2] = -4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2];
+        V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
+
+        // V[i][3] =  1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2];
+        V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+        // V[i][4] =  1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2];
+        V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+        // V[i][5] = 24*Ww[i][2];
+        V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576);
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          vst1q_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 4;
+    }
+#endif  // __aarch64__
+#ifdef __arm_any__
+    for (; channels_remaining >= 2; channels_remaining -= 2)
+    {
+      // Matrices used and computed in this kernel
+      float32x2_t w[3][3], Ww[6][3], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = vld1_f32(inptrs[i][j]);
+          inptrs[i][j] += 2;
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        // Ww[0][j] =  6*w[0][j];
+        Ww[0][j] = vmul_n_f32(w[0][j], 6.0);
+
+        // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+        Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0);
+
+        // Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
+        Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0);
+
+        // Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
+        Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+        // Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
+        Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f);
+
+        // Ww[5][j] = 24*w[2][j];
+        Ww[5][j] = vmul_n_f32(w[2][j], 24.0f);
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        const float recip576 = 1.0f / 576.0f;
+
+        // V[i][0] =  6*Ww[i][0];
+        V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576);
+
+        // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2];
+        V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576);
+
+        // V[i][2] = -4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2];
+        V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576);
+
+        // V[i][3] =  1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2];
+        V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+        // V[i][4] =  1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2];
+        V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576);
+
+        // V[i][5] = 24*Ww[i][2];
+        V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576);
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          vst1_f32(outptr + m*matrix_stride, V[i][j]);
+        }
+      }
+      outptr += 2;
+    }
+#endif  // __arm_any__
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[3][3], Ww[6][3], V[6][6];
+
+      // Read weights
+      for (int i = 0; i < 3; i++)
+      {
+        for (int j = 0; j < 3; j++)
+        {
+          w[i][j] = *(inptrs[i][j]++);
+        }
+      }
+
+      // Compute the matrix W w
+      for (int j = 0; j < 3; j++)
+      {
+        Ww[0][j] =  6*w[0][j];
+        Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j];
+        Ww[2][j] = -4*w[0][j] +  4*w[1][j] + -4*w[2][j];
+        Ww[3][j] =  1*w[0][j] +  2*w[1][j] +  4*w[2][j];
+        Ww[4][j] =  1*w[0][j] + -2*w[1][j] +  4*w[2][j];
+        Ww[5][j] = 24*w[2][j];
+      }
+
+      // Compute V = W w WT
+      for (int i = 0; i < 6; i++)
+      {
+        V[i][0] = ( 6*Ww[i][0]) / 576.0;
+        V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
+        V[i][2] = (-4*Ww[i][0] +  4*Ww[i][1] + -4*Ww[i][2]) / 576.0;
+        V[i][3] = ( 1*Ww[i][0] +  2*Ww[i][1] +  4*Ww[i][2]) / 576.0;
+        V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] +  4*Ww[i][2]) / 576.0;
+        V[i][5] = (24*Ww[i][2]) / 576.0;
+      }
+
+      // Store the transformed weights
+      for (int i = 0, m = 0; i < 6; i++)
+      {
+        for (int j = 0; j < 6; j++, m++)
+        {
+          *(outptr + m*matrix_stride) = V[i][j];
+        }
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>;
+
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
new file mode 100644
index 0000000..4572348
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm.hpp"
+#include "kernel.hpp"
+
+namespace winograd
+{
+
+template <>
+void WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::execute(
+  const int n_output_channels,
+  const int n_input_channels,
+  const float* const input,  // NOTE: Data in HWIO order
+  float* const output,
+  const int matrix_stride,
+  const int matrix_row_stride
+)
+{
+  // Get pointers to each cell of the weight tensor
+  const auto weight_col_stride = n_input_channels * n_output_channels;
+  const float *inptrs[3];
+  for (int j = 0; j < 3; j++)
+  {
+    inptrs[j] = input + j*weight_col_stride;
+  }
+
+  // For each input channel
+  for (int ic = 0; ic < n_input_channels; ic++)
+  {
+    float *outptr = output + ic * matrix_row_stride;
+
+    // For each output channel
+    int channels_remaining = n_output_channels;
+    for (; channels_remaining; channels_remaining--)
+    {
+      // Matrices used and computed in this kernel
+      float w[3], V[inner_tile_cols];
+
+      // Read weights
+      for (int j = 0; j < 3; j++)
+      {
+        w[j] = *(inptrs[j]++);
+      }
+
+      // Compute V = w WT
+      V[0] = (w[0]*-1) / 36.0f;
+      V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f;
+      V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f;
+      V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f;
+      V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f;
+      V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f;
+      V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f;
+      V[7] = (w[2]*1) / 1;
+
+      // Store the transformed weights
+      for (int j = 0; j < inner_tile_cols; j++)
+      {
+        *(outptr + j*matrix_stride) = V[j];
+      }
+      outptr++;
+    }
+  }
+}
+
+template class WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>;
+template class WeightTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>;
+
+}  // namespace