COMPMID-1975: Update depthwise convolution.

Change-Id: Iad58672be35710a7ec2e918653d6d529709387e8
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/898
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
index 6071153..ec672e0 100644
--- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -40,11 +40,8 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "support/ToolchainSupport.h"
 
-using namespace arm_compute;
-using namespace arm_compute::detail;
-using namespace arm_compute::misc::shape_calculator;
-using namespace depthwise;
-
+namespace arm_compute
+{
 namespace
 {
 template <typename T1, typename T2, unsigned int stridex>
@@ -65,7 +62,7 @@
         const int          kernel_stride_z = weights->info()->strides_in_bytes().z();
         const int          output_w        = output->info()->dimension(0);
         const int          output_h        = output->info()->dimension(1);
-        const int          delta_input     = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
+        const int          delta_input     = detail::get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
         const unsigned int conv_stride_y   = std::get<1>(conv_info.stride());
         const unsigned int conv_pad_x      = conv_info.pad_left();
         const unsigned int conv_pad_y      = conv_info.pad_top();
@@ -100,9 +97,9 @@
             const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
             const auto ptr_weights_r1 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y);
             const auto ptr_weights_r2 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y * 2);
-            const auto vw_r0          = load_matrix_row(ptr_weights_r0, weights_offset);
-            const auto vw_r1          = load_matrix_row(ptr_weights_r1, weights_offset);
-            const auto vw_r2          = load_matrix_row(ptr_weights_r2, weights_offset);
+            const auto vw_r0          = detail::load_matrix_row(ptr_weights_r0, weights_offset);
+            const auto vw_r1          = detail::load_matrix_row(ptr_weights_r1, weights_offset);
+            const auto vw_r2          = detail::load_matrix_row(ptr_weights_r2, weights_offset);
 
             for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
             {
@@ -115,8 +112,8 @@
                     in_top += delta_input, in_mid += delta_input, in_low += delta_input,
                     p_out += num_elems_written_per_iteration)
                 {
-                    auto vres = convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset);
-                    store_results<stridex>(p_out, vres);
+                    auto vres = detail::convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset);
+                    detail::store_results<stridex>(p_out, vres);
                 }
             }
         },
@@ -145,7 +142,7 @@
     }
 }
 
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, bool is_optimized)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
@@ -156,15 +153,11 @@
     const unsigned int height_idx  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
 
     ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) != 3 || weights->dimension(height_idx) != 3);
-
-    if(!is_optimized)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
-    }
+    ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
 
     if(output->total_size() != 0)
     {
-        const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
+        const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
 
         if(is_data_type_quantized_asymmetric(input->data_type()))
@@ -180,95 +173,61 @@
     return Status{};
 }
 
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, bool is_optimized,
-                                                        IDepthwiseConvolution *convolver = nullptr)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
 {
     Window win;
     bool   window_changed = false;
 
-    if(is_optimized)
+    // Get convolved dimensions
+    const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
+    const DataType    output_dt    = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type();
+
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
+
+    // Configure kernel window (generic)
+    const unsigned int conv_stride_x = conv_info.stride().first;
+    const unsigned int conv_stride_y = conv_info.stride().second;
+    const unsigned int conv_pad_top  = conv_info.pad_top();
+    const unsigned int conv_pad_left = conv_info.pad_left();
+
+    unsigned int num_elems_written_per_iteration = 16 >> conv_stride_x;
+    unsigned int num_elems_read_per_iteration    = 0;
+
+    switch(input->data_type())
     {
-        if(convolver != nullptr)
-        {
-            auto win_last = convolver->get_window();
-            win.set(Window::DimX, Window::Dimension(0, win_last, 1));
-
-            // Auto-configure output
-            bool        same_padding = conv_info.has_padding();
-            TensorShape output_shape{ input->tensor_shape() };
-
-            output_shape.set(1, convolver->output_size(output_shape.y(), same_padding)); // Set width
-            output_shape.set(2, convolver->output_size(output_shape.z(), same_padding)); // Set height
-
-            const DataType output_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type();
-
-            // Output auto inizialitation if not yet initialized
-            auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
-
-            // Configure window (optimised)
-            // Set padding in channels
-            const int num_channels = weights->dimension(0);
-            if((num_channels >= 128) && (num_channels % 16 == 0))
-            {
-                input->extend_padding(PaddingSize(0, 4, 0, 0));
-                weights->extend_padding(PaddingSize(0, 4, 0, 0));
-                output->extend_padding(PaddingSize(0, 4, 0, 0));
-            }
-        }
-    }
-    else
-    {
-        // Get convolved dimensions
-        const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
-        const DataType    output_dt    = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type();
-
-        // Output auto inizialitation if not yet initialized
-        auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
-
-        // Configure kernel window (generic)
-        const unsigned int conv_stride_x = conv_info.stride().first;
-        const unsigned int conv_stride_y = conv_info.stride().second;
-        const unsigned int conv_pad_top  = conv_info.pad_top();
-        const unsigned int conv_pad_left = conv_info.pad_left();
-
-        unsigned int num_elems_written_per_iteration = 16 >> conv_stride_x;
-        unsigned int num_elems_read_per_iteration    = 0;
-
-        switch(input->data_type())
-        {
-            case DataType::QASYMM8:
-                num_elems_read_per_iteration = 16;
-                break;
+        case DataType::QASYMM8:
+            num_elems_read_per_iteration = 16;
+            break;
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-            case DataType::F16:
-                num_elems_read_per_iteration = 24;
-                break;
+        case DataType::F16:
+            num_elems_read_per_iteration = 24;
+            break;
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-            case DataType::F32:
-                num_elems_read_per_iteration = 12;
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Data type not supported.");
-        }
-
-        // Configure kernel window
-        win = calculate_max_window(*output, Steps(num_elems_written_per_iteration));
-
-        AccessWindowRectangle  input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3, conv_stride_x, conv_stride_y);
-        AccessWindowStatic     weights_access(weights, 0, 0, 3, 3);
-        AccessWindowHorizontal output_access(output, 0, num_elems_written_per_iteration);
-
-        window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
-        output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+        case DataType::F32:
+            num_elems_read_per_iteration = 12;
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Data type not supported.");
     }
 
+    // Configure kernel window
+    win = calculate_max_window(*output, Steps(num_elems_written_per_iteration));
+
+    AccessWindowRectangle  input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3, conv_stride_x, conv_stride_y);
+    AccessWindowStatic     weights_access(weights, 0, 0, 3, 3);
+    AccessWindowHorizontal output_access(output, 0, num_elems_written_per_iteration);
+
+    window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
+    output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+
     Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
     return std::make_pair(err, win);
 }
 } // namespace
 
 NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
-    : _border_size(0), _input(), _output(), _weights(), _conv_info(), _convolver(nullptr), _num_elems_written_per_iteration(0), _run_optimized(false), _depth_multiplier(1)
+    : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1)
 {
 }
 
@@ -277,34 +236,28 @@
     return _border_size;
 }
 
-void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
-                                                     DataLayout data_layout)
+void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier));
 
-    _input            = input;
-    _output           = output;
-    _weights          = weights;
-    _conv_info        = conv_info;
-    _depth_multiplier = depth_multiplier;
-    _convolver        = nullptr;
+    _input                           = input;
+    _output                          = output;
+    _weights                         = weights;
+    _conv_info                       = conv_info;
+    _depth_multiplier                = depth_multiplier;
+    _num_elems_written_per_iteration = 16 >> _conv_info.stride().first;
+    _border_size                     = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left());
 
-    _run_optimized = NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(input->info()->tensor_shape(),
-                                                                                           conv_info,
-                                                                                           input->info()->data_type(), depth_multiplier,
-                                                                                           data_layout);
-
-    (_run_optimized) ? configure_optimized() : configure_generic();
+    auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier);
+    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+    INEKernel::configure(win_config.second);
 }
 
 Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
 {
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
-
-    bool is_optimized = NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(input->tensor_shape(), conv_info, input->data_type(), depth_multiplier, input->data_layout());
-
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier, is_optimized));
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, is_optimized).first);
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier).first);
     return Status{};
 }
 
@@ -313,80 +266,6 @@
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_UNUSED(info);
 
-    (_run_optimized) ? run_optimized(window, info) : run_generic(window, info);
-}
-
-bool NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, unsigned int depth_multiplier, DataLayout data_layout)
-{
-    // Reshape input shape if in NHWC format
-    TensorShape in_shape{ input_shape };
-    if(data_layout == DataLayout::NHWC)
-    {
-        in_shape.set(Window::DimX, input_shape.y());
-        in_shape.set(Window::DimY, input_shape.z());
-        in_shape.set(Window::DimZ, input_shape.x());
-    }
-
-    // Check supported data type
-    bool supported_datatype = is_data_type_float(dt) || is_data_type_quantized(dt);
-
-    // Check for supported strides
-    const auto &strides           = conv_info.stride();
-    bool        supported_strides = (strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2));
-
-    // Check for supported padding
-    const auto    pad_top           = conv_info.pad_top();
-    const auto    pad_right         = conv_info.pad_right();
-    const auto    pad_bottom        = conv_info.pad_bottom();
-    const auto    pad_left          = conv_info.pad_left();
-    PadStrideInfo same_pad          = calculate_same_pad(in_shape, TensorShape(3U, 3U), conv_info);
-    bool          is_same_padding   = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left());
-    bool          is_valid_padding  = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0);
-    bool          supported_padding = is_same_padding || is_valid_padding;
-
-    return supported_datatype && supported_strides && supported_padding && (depth_multiplier == 1);
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::generate_convolver()
-{
-    ARM_COMPUTE_ERROR_ON_CPU_F16_UNSUPPORTED(_input);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(_input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(_input, _weights);
-    ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(1) != 3 || _weights->info()->dimension(2) != 3);
-
-    _convolver = create_convolver_object(_conv_info, _weights, _input, _output, true);
-    if(_convolver)
-    {
-        _convolver->set_offsets(-_input->info()->quantization_info().offset, -_weights->info()->quantization_info().offset);
-    }
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::configure_generic()
-{
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, _run_optimized));
-
-    _num_elems_written_per_iteration = 16 >> _conv_info.stride().first;
-    _border_size                     = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left());
-
-    auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, false);
-    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-    INEKernel::configure(win_config.second);
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::configure_optimized()
-{
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, _run_optimized));
-
-    _border_size = BorderSize(0, 0);
-    _convolver   = create_convolver_object(_conv_info, _weights, _input, _output);
-
-    auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, true, _convolver.get());
-    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-    INEKernel::configure(win_config.second);
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::run_generic(const Window &window, const ThreadInfo &info)
-{
     ARM_COMPUTE_UNUSED(info);
 
     switch(_input->info()->data_type())
@@ -406,120 +285,4 @@
             ARM_COMPUTE_ERROR("Not implemented");
     }
 }
-
-void NEDepthwiseConvolutionLayer3x3Kernel::run_optimized(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON(!_convolver);
-
-    const size_t start = window.x().start();
-    const size_t end   = window.x().end();
-    _convolver->run(start, end);
-}
-
-std::unique_ptr<depthwise::IDepthwiseConvolution> NEDepthwiseConvolutionLayer3x3Kernel::create_convolver_object(PadStrideInfo  conv_info,
-                                                                                                                const ITensor *w,
-                                                                                                                const ITensor *in,
-                                                                                                                ITensor       *out,
-                                                                                                                bool           setup_strides)
-{
-    const DataType    dt                  = in->info()->data_type();
-    const TensorShape shape               = in->info()->tensor_shape();
-    const int         in_rows             = shape.z();
-    const int         in_cols             = shape.y();
-    const int         n_batches           = shape[3];
-    const int         n_channels          = shape.x();
-    const bool        padding_same        = conv_info.has_padding();
-    const int         weight_col_stride   = (setup_strides) ? w->info()->strides_in_bytes().y() / w->info()->element_size() : 0;
-    const int         weight_row_stride   = (setup_strides) ? w->info()->strides_in_bytes().z() / w->info()->element_size() : 0;
-    const int         input_col_stride    = (setup_strides) ? in->info()->strides_in_bytes().y() / in->info()->element_size() : 0;
-    const int         input_row_stride    = (setup_strides) ? in->info()->strides_in_bytes().z() / in->info()->element_size() : 0;
-    const int         input_batch_stride  = (setup_strides) ? in->info()->strides_in_bytes()[3] / in->info()->element_size() : 0;
-    const int         output_col_stride   = (setup_strides) ? out->info()->strides_in_bytes().y() / out->info()->element_size() : 0;
-    const int         output_row_stride   = (setup_strides) ? out->info()->strides_in_bytes().z() / out->info()->element_size() : 0;
-    const int         output_batch_stride = (setup_strides) ? out->info()->strides_in_bytes()[3] / out->info()->element_size() : 0;
-
-    const auto stride_x = conv_info.stride().first;
-    switch(dt)
-    {
-        case DataType::QASYMM8:
-        {
-            switch(stride_x)
-            {
-                case 1:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 1, 1, uint8_t, int32_t>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const uint8_t *>(w->ptr_to_element(Coordinates())),
-                               in->ptr_to_element(Coordinates()),
-                               reinterpret_cast<int32_t *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                case 2:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 2, 2, uint8_t, int32_t>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const uint8_t *>(w->ptr_to_element(Coordinates())),
-                               in->ptr_to_element(Coordinates()),
-                               reinterpret_cast<int32_t *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                default:
-                    return nullptr;
-            }
-            break;
-        }
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-        case DataType::F16:
-        {
-            switch(stride_x)
-            {
-                case 1:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 1, 1, float16_t, float16_t>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const float16_t *>(w->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float16_t *>(in->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float16_t *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                case 2:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 2, 2, float16_t, float16_t>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const float16_t *>(w->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float16_t *>(in->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float16_t *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                default:
-                    return nullptr;
-            }
-            break;
-        }
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-        case DataType::F32:
-        {
-            switch(stride_x)
-            {
-                case 1:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const float *>(w->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float *>(in->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                case 2:
-                    return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float>>(
-                               n_batches, in_rows, in_cols, n_channels, padding_same,
-                               reinterpret_cast<const float *>(w->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float *>(in->ptr_to_element(Coordinates())),
-                               reinterpret_cast<float *>(out->ptr_to_element(Coordinates())), weight_col_stride,
-                               weight_row_stride, input_col_stride, input_row_stride, input_batch_stride,
-                               output_col_stride, output_row_stride, output_batch_stride);
-                default:
-                    return nullptr;
-            }
-            break;
-        }
-        default:
-            return nullptr;
-    }
-}
\ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/convolution/common/padding.cpp b/src/core/NEON/kernels/convolution/common/padding.cpp
new file mode 100644
index 0000000..b50067b
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/padding.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <cstring>
+#include <cstdint>
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/padding.hpp"
+
+namespace padding
+{
+
+template <typename T>
+void copy_and_pad_tile(
+  const unsigned int tile_rows,
+  const unsigned int tile_cols,
+  const unsigned int n_channels,
+  const T* const inptr,
+  const unsigned int in_row_stride,
+  const unsigned int in_col_stride,
+  T* const outptr,
+  const unsigned int out_row_stride,
+  const unsigned int out_col_stride,
+  const unsigned int pad_top,
+  const unsigned int pad_left,
+  const unsigned int pad_bottom,
+  const unsigned int pad_right,
+  const T pad_value
+)
+{
+  for (unsigned int out_i = 0; out_i < tile_rows; out_i++)
+  {
+    for (unsigned int out_j = 0; out_j < tile_cols; out_j++)
+    {
+      T* const output = outptr + out_i*out_row_stride + out_j*out_col_stride;
+
+      if (out_i < pad_top || tile_rows - pad_bottom <= out_i ||
+          out_j < pad_left || tile_cols - pad_right <= out_j)
+      {
+        for (unsigned int n = 0; n < n_channels; n++)
+        {
+          output[n] = pad_value;
+        }
+      }
+      else
+      {
+        const auto in_i = out_i - pad_top, in_j = out_j - pad_left;
+        const T* const input = inptr + in_i*in_row_stride + in_j*in_col_stride;
+        std::memcpy(output, input, n_channels * sizeof(T));
+      }
+    }
+  }
+}
+
+template void copy_and_pad_tile(
+  unsigned int, unsigned int, unsigned int,
+  const uint8_t *, unsigned int, unsigned int,
+  uint8_t *, unsigned int, unsigned int,
+  unsigned int, unsigned int, unsigned int, unsigned int, uint8_t
+);
+
+template void copy_and_pad_tile(
+  unsigned int, unsigned int, unsigned int,
+  const float *, unsigned int, unsigned int,
+  float *, unsigned int, unsigned int,
+  unsigned int, unsigned int, unsigned int, unsigned int, float
+);
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template void copy_and_pad_tile(
+  unsigned int, unsigned int, unsigned int,
+  const float16_t *, unsigned int, unsigned int,
+  float16_t *, unsigned int, unsigned int,
+  unsigned int, unsigned int, unsigned int, unsigned int, float16_t
+);
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+template <unsigned int TileRows, unsigned int TileCols>
+void CopyCropped<TileRows, TileCols>::execute(
+  const size_t size,
+  const void * const inptr,
+  const size_t in_row_stride,
+  const size_t in_col_stride,
+  void * const outptr,
+  const size_t out_row_stride,
+  const size_t out_col_stride,
+  const unsigned int pad_top,
+  const unsigned int pad_left,
+  const unsigned int pad_bottom,
+  const unsigned int pad_right
+)
+{
+  for (unsigned int out_i = 0, in_i = pad_top; in_i < TileRows - pad_bottom; out_i++, in_i++)
+  {
+    for (unsigned int out_j = 0, in_j = pad_left; in_j < TileCols - pad_right; out_j++, in_j++)
+    {
+      std::memcpy(
+        static_cast<uint8_t *>(outptr) + out_i*out_row_stride + out_j*out_col_stride,
+        static_cast<const uint8_t *>(inptr) + in_i*in_row_stride + in_j*in_col_stride,
+        size
+      );
+    }
+  }
+}
+
+template class CopyCropped<2, 2>;
+template class CopyCropped<3, 3>;
+template class CopyCropped<4, 4>;
+
+}  // namespace padding
diff --git a/src/core/NEON/kernels/convolution/common/qasymm8.cpp b/src/core/NEON/kernels/convolution/common/qasymm8.cpp
new file mode 100644
index 0000000..1de9ebf
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/qasymm8.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cmath>
+#include <limits>
+
+#include "arm_compute/core/NEON/kernels/convolution/common/qasymm8.hpp"
+
+namespace qasymm8
+{
+#if(__ANDROID__ || BARE_METAL)
+template <typename T> T round(T val) {  return ::round(val); }
+template <typename T> T exp2(T val) { return ::exp2(val); }
+template <typename T> T log2(T val) { return ::log2(val); }
+#else  /* (__ANDROID__ || BARE_METAL) */
+template <typename T> T round(T val) { return std::round(val); }
+template <typename T> T exp2(T val) { return std::exp2(val); }
+template <typename T> T log2(T val) { return std::log2(val); }
+#endif  /* (__ANDROID__ || BARE_METAL) */
+
+uint8_t QAsymm8Params::quantize(const float value) const
+{
+  const float transformed = value / scale + offset;
+  return static_cast<uint8_t>(round(std::max(0.0f, std::min(255.0f, transformed))));
+}
+
+float QAsymm8Params::dequantize(const uint8_t value) const
+{
+  return scale * (static_cast<float>(value) - offset);
+}
+
+QAsymm8RescaleParams QAsymm8RescaleParams::make_rescale_params(
+  const QAsymm8Params& weight_quant,
+  const QAsymm8Params& input_quant,
+  const QAsymm8Params& output_quant
+)
+{
+  // Based on the gemmlowp approach: https://github.com/google/gemmlowp/blob/master/doc/quantization_example.cc
+  const float rescale = weight_quant.scale * input_quant.scale / output_quant.scale;
+  const float shiftf = round(log2(0.5f / rescale));
+  const float multf = exp2(31.0f + shiftf)*rescale;
+
+  int64_t shift = static_cast<int64_t>(shiftf);
+  int64_t mult = static_cast<int64_t>(multf);
+
+  if (mult == (1ll << 31))
+  {
+    mult /= 2;
+    shift--;
+  }
+
+  assert(shift >= 0);
+  assert(mult <= std::numeric_limits<int32_t>::max());
+
+  return QAsymm8RescaleParams(
+    static_cast<int32_t>(shift),
+    static_cast<int32_t>(mult),
+    rescale
+  );
+}
+
+QAsymm8RescaleParams::QAsymm8RescaleParams(int32_t shift, int32_t multi, float rescale)
+  : shift(shift), multiplier(multi), rescale(rescale)
+{
+}
+}
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp
index ca1de26..1272754 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,546 +25,1144 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<2, 2, 3, 3, 1, 1, float, float>;
+
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float, float>;
 
 #ifdef __aarch64__
-
 template <>
 template <>
-void ConvImpl::process_tile<true, 0, 0, 0, 0, 0, 0>(
-  const int n_channels,
-  const float* const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float* const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float* const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int, const int, const int, const int, const int, const int, const int, const int
+void Conv::execute_tile<ActivationFunction::None>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
 )
 {
-  // Copy pointers
-  const float *uptr0 = inptr;
-  const float *wptr0 = weights;
-  float *vptr0 = outptr;
+  __asm __volatile(
+    "add x26, %[inptr0], %[input_row_stride]\n"
+    "add x21, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x23, %[outptr0], %[output_row_stride]\n"
+    "add x27, x26, %[input_row_stride]\n"
+    "add x22, x21, %[input_col_stride1]\n"
+    "and x24, %[n_channels], #3\n"
+    "add x28, x27, %[input_row_stride]\n"
+    "lsr x25, %[n_channels], #2\n"
+    "cbz x25, 4f\n"
+    "1:\n"
+    "ldr q15, [%[wbptr]]\n"
+    "subs x25, x25, #1\n"
+    "mov v3.16b, v15.16b\n"
+    "ldr q14, [%[wbptr], #16]\n"
+    "mov v1.16b, v15.16b\n"
+    "ldr q13, [%[wbptr], #32]\n"
+    "mov v2.16b, v15.16b\n"
+    "ldr q12, [%[wbptr], #48]\n"
+    "mov v0.16b, v15.16b\n"
+    "ldr q11, [%[wbptr], #64]\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "ldr q9, [%[wbptr], #96]\n"
+    "ldr q8, [%[wbptr], #112]\n"
+    "ldr q7, [%[wbptr], #128]\n"
+    "ldr q6, [%[wbptr], #144]\n"
+    "ldr q24, [%[inptr0]]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "ldr q22, [x26]\n"
+    "fmla v1.4s, v22.4s, v14.4s\n"
+    "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v19.4s, v14.4s\n"
+    "ldr q18, [x27]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "ldr q21, [x26, %[input_col_stride1]]\n"
+    "fmla v1.4s, v18.4s, v11.4s\n"
+    "ldr q17, [%[inptr0], x21]\n"
+    "ldr q20, [x28]\n"
+    "ldr q5, [x27, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v13.4s\n"
+    "fmla v3.4s, v18.4s, v8.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v3.4s, v21.4s, v10.4s\n"
+    "ldr q19, [x26, x21]\n"
+    "fmla v1.4s, v21.4s, v13.4s\n"
+    "ldr q23, [%[inptr0], x22]\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "ldr q22, [x28, %[input_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v14.4s\n"
+    "ldr q21, [x27, x21]\n"
+    "fmla v3.4s, v17.4s, v12.4s\n"
+    "ldr q18, [x26, x22]\n"
+    "fmla v2.4s, v17.4s, v13.4s\n"
+    "ldr q16, [x28, x21]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "ldr q20, [x27, x22]\n"
+    "fmla v3.4s, v5.4s, v7.4s\n"
+    "ldr q4, [x28, x22]\n"
+    "fmla v2.4s, v5.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v1.4s, v5.4s, v10.4s\n"
+    "ldr q15, [%[wbptr]]\n"
+    "fmla v0.4s, v5.4s, v11.4s\n"
+    "ldr q14, [%[wbptr], #16]\n"
+    "fmla v3.4s, v19.4s, v9.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v1.4s, v19.4s, v12.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v2.4s, v19.4s, v10.4s\n"
+    "ldr q11, [%[wbptr], #64]\n"
+    "fmla v0.4s, v19.4s, v13.4s\n"
+    "ldr q24, [%[inptr0]]\n"
+    "fmla v1.4s, v22.4s, v7.4s\n"
+    "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "ldr q17, [%[inptr0], x21]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "ldr q13, [%[wbptr], #32]\n"
+    "fmla v3.4s, v21.4s, v6.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v1.4s, v21.4s, v9.4s\n"
+    "ldr q22, [x26]\n"
+    "fmla v2.4s, v21.4s, v7.4s\n"
+    "ldr q8, [%[wbptr], #112]\n"
+    "str q3, [%[outptr0]]\n"
+    "fmla v0.4s, v21.4s, v10.4s\n"
+    "fmla v1.4s, v16.4s, v6.4s\n"
+    "ldr q21, [x26, %[input_col_stride1]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v0.4s, v18.4s, v12.4s\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "str q1, [x23]\n"
+    "mov v3.16b, v15.16b\n"
+    "fmla v2.4s, v20.4s, v6.4s\n"
+    "ldr q18, [x27]\n"
+    "fmla v0.4s, v16.4s, v7.4s\n"
+    "ldr q12, [%[wbptr], #48]\n"
+    "mov v1.16b, v15.16b\n"
+    "ldr q5, [x27, %[input_col_stride1]]\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "fmla v0.4s, v20.4s, v9.4s\n"
+    "ldr q7, [%[wbptr], #128]\n"
+    "mov v2.16b, v15.16b\n"
+    "add x28, x28, #16\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "ldr q20, [x28]\n"
+    "fmla v0.4s, v4.4s, v6.4s\n"
+    "ldr q9, [%[wbptr], #96]\n"
+    "fmla v1.4s, v22.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v3.4s, v19.4s, v13.4s\n"
+    "subs x25, x25, #1\n"
+    "str q0, [x23, %[output_col_stride1]]\n"
+    "fmla v2.4s, v19.4s, v14.4s\n"
+    "ldr q6, [%[wbptr], #144]\n"
+    "add x23, x23, #16\n"
+    "fmla v3.4s, v18.4s, v8.4s\n"
+    "fmla v1.4s, v18.4s, v11.4s\n"
+    "mov v0.16b, v15.16b\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v3.4s, v21.4s, v10.4s\n"
+    "ldr q19, [x26, x21]\n"
+    "fmla v1.4s, v21.4s, v13.4s\n"
+    "ldr q23, [%[inptr0], x22]\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "ldr q22, [x28, %[input_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v14.4s\n"
+    "ldr q21, [x27, x21]\n"
+    "fmla v3.4s, v17.4s, v12.4s\n"
+    "ldr q18, [x26, x22]\n"
+    "fmla v2.4s, v17.4s, v13.4s\n"
+    "ldr q16, [x28, x21]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "ldr q20, [x27, x22]\n"
+    "fmla v3.4s, v5.4s, v7.4s\n"
+    "ldr q4, [x28, x22]\n"
+    "fmla v2.4s, v5.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v1.4s, v5.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v5.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v3.4s, v19.4s, v9.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v1.4s, v19.4s, v12.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v2.4s, v19.4s, v10.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v0.4s, v19.4s, v13.4s\n"
+    "fmla v3.4s, v21.4s, v6.4s\n"
+    "fmla v1.4s, v22.4s, v7.4s\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "str q3, [%[outptr0]]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "fmla v1.4s, v21.4s, v9.4s\n"
+    "fmla v2.4s, v21.4s, v7.4s\n"
+    "fmla v0.4s, v21.4s, v10.4s\n"
+    "fmla v1.4s, v16.4s, v6.4s\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "fmla v0.4s, v18.4s, v12.4s\n"
+    "str q1, [x23]\n"
+    "fmla v2.4s, v20.4s, v6.4s\n"
+    "fmla v0.4s, v16.4s, v7.4s\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v20.4s, v9.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v0.4s, v4.4s, v6.4s\n"
+    "str q0, [x23, %[output_col_stride1]]\n"
+    "add x23, x23, #16\n"
+    "4:\n"
+    "cbz x24, 7f\n"
+    "ldr s15, [%[wbptr]]\n"
+    "mov v3.16b, v15.16b\n"
+    "ldr s14, [%[wbptr], #4]\n"
+    "mov v1.16b, v15.16b\n"
+    "ldr s13, [%[wbptr], #8]\n"
+    "mov v2.16b, v15.16b\n"
+    "ldr s12, [%[wbptr], #12]\n"
+    "mov v0.16b, v15.16b\n"
+    "ldr s11, [%[wbptr], #16]\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "subs x24, x24, #1\n"
+    "ldr s9, [%[wbptr], #24]\n"
+    "ldr s8, [%[wbptr], #28]\n"
+    "ldr s7, [%[wbptr], #32]\n"
+    "ldr s6, [%[wbptr], #36]\n"
+    "ldr s24, [%[inptr0]]\n"
+    "ldr s22, [x26]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v1.4s, v22.4s, v14.4s\n"
+    "ldr s18, [x27]\n"
+    "fmla v2.4s, v19.4s, v14.4s\n"
+    "ldr s21, [x26, %[input_col_stride1]]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "ldr s17, [%[inptr0], x21]\n"
+    "fmla v1.4s, v18.4s, v11.4s\n"
+    "ldr s20, [x28]\n"
+    "ldr s5, [x27, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v13.4s\n"
+    "fmla v3.4s, v18.4s, v8.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v3.4s, v21.4s, v10.4s\n"
+    "ldr s19, [x26, x21]\n"
+    "fmla v1.4s, v21.4s, v13.4s\n"
+    "ldr s23, [%[inptr0], x22]\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "ldr s22, [x28, %[input_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v14.4s\n"
+    "ldr s21, [x27, x21]\n"
+    "fmla v3.4s, v17.4s, v12.4s\n"
+    "ldr s18, [x26, x22]\n"
+    "fmla v2.4s, v17.4s, v13.4s\n"
+    "ldr s16, [x28, x21]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "ldr s20, [x27, x22]\n"
+    "fmla v3.4s, v5.4s, v7.4s\n"
+    "ldr s4, [x28, x22]\n"
+    "fmla v2.4s, v5.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v1.4s, v5.4s, v10.4s\n"
+    "ldr s15, [%[wbptr]]\n"
+    "fmla v0.4s, v5.4s, v11.4s\n"
+    "ldr s14, [%[wbptr], #4]\n"
+    "fmla v3.4s, v19.4s, v9.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v1.4s, v19.4s, v12.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v2.4s, v19.4s, v10.4s\n"
+    "ldr s11, [%[wbptr], #16]\n"
+    "fmla v0.4s, v19.4s, v13.4s\n"
+    "ldr s24, [%[inptr0]]\n"
+    "fmla v1.4s, v22.4s, v7.4s\n"
+    "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "ldr s17, [%[inptr0], x21]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "ldr s13, [%[wbptr], #8]\n"
+    "fmla v3.4s, v21.4s, v6.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v1.4s, v21.4s, v9.4s\n"
+    "ldr s22, [x26]\n"
+    "fmla v2.4s, v21.4s, v7.4s\n"
+    "ldr s8, [%[wbptr], #28]\n"
+    "str s3, [%[outptr0]]\n"
+    "fmla v0.4s, v21.4s, v10.4s\n"
+    "fmla v1.4s, v16.4s, v6.4s\n"
+    "ldr s21, [x26, %[input_col_stride1]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v0.4s, v18.4s, v12.4s\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "str s1, [x23]\n"
+    "mov v3.16b, v15.16b\n"
+    "fmla v2.4s, v20.4s, v6.4s\n"
+    "ldr s18, [x27]\n"
+    "fmla v0.4s, v16.4s, v7.4s\n"
+    "ldr s12, [%[wbptr], #12]\n"
+    "mov v1.16b, v15.16b\n"
+    "ldr s5, [x27, %[input_col_stride1]]\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "fmla v0.4s, v20.4s, v9.4s\n"
+    "ldr s7, [%[wbptr], #32]\n"
+    "mov v2.16b, v15.16b\n"
+    "add x28, x28, #4\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "ldr s20, [x28]\n"
+    "fmla v0.4s, v4.4s, v6.4s\n"
+    "ldr s9, [%[wbptr], #24]\n"
+    "fmla v1.4s, v22.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v3.4s, v19.4s, v13.4s\n"
+    "subs x24, x24, #1\n"
+    "str s0, [x23, %[output_col_stride1]]\n"
+    "fmla v2.4s, v19.4s, v14.4s\n"
+    "ldr s6, [%[wbptr], #36]\n"
+    "add x23, x23, #4\n"
+    "fmla v3.4s, v18.4s, v8.4s\n"
+    "fmla v1.4s, v18.4s, v11.4s\n"
+    "mov v0.16b, v15.16b\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v3.4s, v21.4s, v10.4s\n"
+    "ldr s19, [x26, x21]\n"
+    "fmla v1.4s, v21.4s, v13.4s\n"
+    "ldr s23, [%[inptr0], x22]\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "ldr s22, [x28, %[input_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v14.4s\n"
+    "ldr s21, [x27, x21]\n"
+    "fmla v3.4s, v17.4s, v12.4s\n"
+    "ldr s18, [x26, x22]\n"
+    "fmla v2.4s, v17.4s, v13.4s\n"
+    "ldr s16, [x28, x21]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "ldr s20, [x27, x22]\n"
+    "fmla v3.4s, v5.4s, v7.4s\n"
+    "ldr s4, [x28, x22]\n"
+    "fmla v2.4s, v5.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v1.4s, v5.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v5.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v3.4s, v19.4s, v9.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v1.4s, v19.4s, v12.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v2.4s, v19.4s, v10.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v0.4s, v19.4s, v13.4s\n"
+    "fmla v3.4s, v21.4s, v6.4s\n"
+    "fmla v1.4s, v22.4s, v7.4s\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "str s3, [%[outptr0]]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "fmla v1.4s, v21.4s, v9.4s\n"
+    "fmla v2.4s, v21.4s, v7.4s\n"
+    "fmla v0.4s, v21.4s, v10.4s\n"
+    "fmla v1.4s, v16.4s, v6.4s\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "fmla v0.4s, v18.4s, v12.4s\n"
+    "str s1, [x23]\n"
+    "fmla v2.4s, v20.4s, v6.4s\n"
+    "fmla v0.4s, v16.4s, v7.4s\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v20.4s, v9.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v0.4s, v4.4s, v6.4s\n"
+    "str s0, [x23, %[output_col_stride1]]\n"
+    "add x23, x23, #4\n"
+    "7:\n"
+    : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
-  int channels_remaining = n_channels;
-  if (channels_remaining >= 4)
-  {
-    // Process blocks of 4 channels at a time
-    int n_iters = ((channels_remaining / 4) + 1)/2 - 1;
-    const bool odd_tail = (channels_remaining / 4) & 1;
-    channels_remaining %= 4;
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x21, %[inptr0], %[input_row_stride]\n"
+    "add x24, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x22, %[outptr0], %[output_row_stride]\n"
+    "add x23, x21, %[input_row_stride]\n"
+    "add x27, x24, %[input_col_stride1]\n"
+    "and x25, %[n_channels], #3\n"
+    "add x28, x23, %[input_row_stride]\n"
+    "lsr x26, %[n_channels], #2\n"
+    "cbz x26, 4f\n"
+    "1:\n"
+    "ldr q11, [%[wbptr]]\n"
+    "subs x26, x26, #1\n"
+    "mov v17.16b, v11.16b\n"
+    "ldr q13, [%[wbptr], #16]\n"
+    "mov v15.16b, v11.16b\n"
+    "ldr q4, [%[wbptr], #32]\n"
+    "mov v16.16b, v11.16b\n"
+    "ldr q2, [%[wbptr], #48]\n"
+    "mov v14.16b, v11.16b\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "ldr q1, [%[wbptr], #96]\n"
+    "ldr q12, [%[wbptr], #112]\n"
+    "ldr q0, [%[wbptr], #128]\n"
+    "ldr q3, [%[wbptr], #144]\n"
+    "ldr q6, [%[inptr0]]\n"
+    "fmla v17.4s, v6.4s, v13.4s\n"
+    "ldr q27, [x21]\n"
+    "fmla v15.4s, v27.4s, v13.4s\n"
+    "ldr q23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "ldr q24, [x23]\n"
+    "fmla v17.4s, v27.4s, v5.4s\n"
+    "ldr q22, [x21, %[input_col_stride1]]\n"
+    "ldr q9, [%[inptr0], x24]\n"
+    "ldr q8, [x28]\n"
+    "ldr q20, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v17.4s, v24.4s, v12.4s\n"
+    "ldr q26, [x21, x24]\n"
+    "fmla v15.4s, v24.4s, v5.4s\n"
+    "ldr q27, [%[inptr0], x27]\n"
+    "fmla v16.4s, v22.4s, v5.4s\n"
+    "ldr q25, [x28, %[input_col_stride1]]\n"
+    "fmla v17.4s, v22.4s, v10.4s\n"
+    "ldr q24, [x23, x24]\n"
+    "fmla v15.4s, v22.4s, v4.4s\n"
+    "ldr q21, [x21, x27]\n"
+    "fmla v14.4s, v22.4s, v13.4s\n"
+    "ldr q7, [x28, x24]\n"
+    "fmla v17.4s, v9.4s, v2.4s\n"
+    "ldr q19, [x23, x27]\n"
+    "fmla v16.4s, v9.4s, v4.4s\n"
+    "ldr q18, [x28, x27]\n"
+    "fmla v15.4s, v8.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v20.4s, v0.4s\n"
+    "ldr q11, [%[wbptr]]\n"
+    "fmla v16.4s, v20.4s, v12.4s\n"
+    "ldr q13, [%[wbptr], #16]\n"
+    "fmla v15.4s, v20.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v14.4s, v20.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v17.4s, v26.4s, v1.4s\n"
+    "ldr q6, [%[inptr0]]\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "ldr q23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "ldr q9, [%[inptr0], x24]\n"
+    "fmla v15.4s, v25.4s, v0.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v16.4s, v27.4s, v2.4s\n"
+    "ldr q27, [x21]\n"
+    "fmla v14.4s, v25.4s, v12.4s\n"
+    "ldr q4, [%[wbptr], #32]\n"
+    "fmla v17.4s, v24.4s, v3.4s\n"
+    "ldr q22, [x21, %[input_col_stride1]]\n"
+    "fmla v15.4s, v24.4s, v1.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v16.4s, v24.4s, v0.4s\n"
+    "ldr q12, [%[wbptr], #112]\n"
+    "fmla v14.4s, v24.4s, v10.4s\n"
+    "ldr q24, [x23]\n"
+    "fmla v15.4s, v7.4s, v3.4s\n"
+    "ldr q20, [x23, %[input_col_stride1]]\n"
+    "fmla v16.4s, v21.4s, v1.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v14.4s, v21.4s, v2.4s\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "movi v26.16b, #0\n"
+    "ldr q8, [x28]\n"
+    "fmla v16.4s, v19.4s, v3.4s\n"
+    "subs x26, x26, #1\n"
+    "fmla v14.4s, v7.4s, v0.4s\n"
+    "ldr q2, [%[wbptr], #48]\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str q17, [%[outptr0]]\n"
+    "str q16, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v14.4s, v19.4s, v1.4s\n"
+    "str q15, [x22]\n"
+    "mov v17.16b, v11.16b\n"
+    "mov v15.16b, v11.16b\n"
+    "ldr q0, [%[wbptr], #128]\n"
+    "fmla v14.4s, v18.4s, v3.4s\n"
+    "ldr q1, [%[wbptr], #96]\n"
+    "mov v16.16b, v11.16b\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v17.4s, v6.4s, v13.4s\n"
+    "fmla v15.4s, v27.4s, v13.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "ldr q3, [%[wbptr], #144]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "str q14, [x22, %[output_col_stride1]]\n"
+    "mov v14.16b, v11.16b\n"
+    "add x22, x22, #16\n"
+    "fmla v17.4s, v27.4s, v5.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v17.4s, v24.4s, v12.4s\n"
+    "ldr q26, [x21, x24]\n"
+    "fmla v15.4s, v24.4s, v5.4s\n"
+    "ldr q27, [%[inptr0], x27]\n"
+    "fmla v16.4s, v22.4s, v5.4s\n"
+    "ldr q25, [x28, %[input_col_stride1]]\n"
+    "fmla v17.4s, v22.4s, v10.4s\n"
+    "ldr q24, [x23, x24]\n"
+    "fmla v15.4s, v22.4s, v4.4s\n"
+    "ldr q21, [x21, x27]\n"
+    "fmla v14.4s, v22.4s, v13.4s\n"
+    "ldr q7, [x28, x24]\n"
+    "fmla v17.4s, v9.4s, v2.4s\n"
+    "ldr q19, [x23, x27]\n"
+    "fmla v16.4s, v9.4s, v4.4s\n"
+    "ldr q18, [x28, x27]\n"
+    "fmla v15.4s, v8.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v20.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v20.4s, v12.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v15.4s, v20.4s, v10.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v14.4s, v20.4s, v5.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v17.4s, v26.4s, v1.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v17.4s, v24.4s, v3.4s\n"
+    "fmla v16.4s, v27.4s, v2.4s\n"
+    "fmla v15.4s, v25.4s, v0.4s\n"
+    "fmla v14.4s, v25.4s, v12.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmla v16.4s, v24.4s, v0.4s\n"
+    "str q17, [%[outptr0]]\n"
+    "fmla v15.4s, v24.4s, v1.4s\n"
+    "fmla v14.4s, v24.4s, v10.4s\n"
+    "fmla v16.4s, v21.4s, v1.4s\n"
+    "fmla v15.4s, v7.4s, v3.4s\n"
+    "fmla v14.4s, v21.4s, v2.4s\n"
+    "fmla v16.4s, v19.4s, v3.4s\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "fmla v14.4s, v7.4s, v0.4s\n"
+    "str q15, [x22]\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "fmla v14.4s, v19.4s, v1.4s\n"
+    "str q16, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v14.4s, v18.4s, v3.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "str q14, [x22, %[output_col_stride1]]\n"
+    "add x22, x22, #16\n"
+    "4:\n"
+    "cbz x25, 7f\n"
+    "ldr s11, [%[wbptr]]\n"
+    "mov v17.16b, v11.16b\n"
+    "ldr s13, [%[wbptr], #4]\n"
+    "mov v15.16b, v11.16b\n"
+    "ldr s4, [%[wbptr], #8]\n"
+    "mov v16.16b, v11.16b\n"
+    "ldr s2, [%[wbptr], #12]\n"
+    "mov v14.16b, v11.16b\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "subs x25, x25, #1\n"
+    "ldr s1, [%[wbptr], #24]\n"
+    "ldr s12, [%[wbptr], #28]\n"
+    "ldr s0, [%[wbptr], #32]\n"
+    "ldr s3, [%[wbptr], #36]\n"
+    "ldr s6, [%[inptr0]]\n"
+    "ldr s27, [x21]\n"
+    "fmla v17.4s, v6.4s, v13.4s\n"
+    "ldr s23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v15.4s, v27.4s, v13.4s\n"
+    "ldr s24, [x23]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "ldr s22, [x21, %[input_col_stride1]]\n"
+    "fmla v17.4s, v27.4s, v5.4s\n"
+    "ldr s9, [%[inptr0], x24]\n"
+    "ldr s8, [x28]\n"
+    "ldr s20, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v17.4s, v24.4s, v12.4s\n"
+    "ldr s26, [x21, x24]\n"
+    "fmla v15.4s, v24.4s, v5.4s\n"
+    "ldr s27, [%[inptr0], x27]\n"
+    "fmla v16.4s, v22.4s, v5.4s\n"
+    "ldr s25, [x28, %[input_col_stride1]]\n"
+    "fmla v17.4s, v22.4s, v10.4s\n"
+    "ldr s24, [x23, x24]\n"
+    "fmla v15.4s, v22.4s, v4.4s\n"
+    "ldr s21, [x21, x27]\n"
+    "fmla v14.4s, v22.4s, v13.4s\n"
+    "ldr s7, [x28, x24]\n"
+    "fmla v17.4s, v9.4s, v2.4s\n"
+    "ldr s19, [x23, x27]\n"
+    "fmla v16.4s, v9.4s, v4.4s\n"
+    "ldr s18, [x28, x27]\n"
+    "fmla v15.4s, v8.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v20.4s, v0.4s\n"
+    "ldr s11, [%[wbptr]]\n"
+    "fmla v16.4s, v20.4s, v12.4s\n"
+    "ldr s13, [%[wbptr], #4]\n"
+    "fmla v15.4s, v20.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v14.4s, v20.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v17.4s, v26.4s, v1.4s\n"
+    "ldr s6, [%[inptr0]]\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "ldr s23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "ldr s9, [%[inptr0], x24]\n"
+    "fmla v15.4s, v25.4s, v0.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v16.4s, v27.4s, v2.4s\n"
+    "ldr s27, [x21]\n"
+    "fmla v14.4s, v25.4s, v12.4s\n"
+    "ldr s4, [%[wbptr], #8]\n"
+    "fmla v17.4s, v24.4s, v3.4s\n"
+    "ldr s22, [x21, %[input_col_stride1]]\n"
+    "fmla v15.4s, v24.4s, v1.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v16.4s, v24.4s, v0.4s\n"
+    "ldr s12, [%[wbptr], #28]\n"
+    "fmla v14.4s, v24.4s, v10.4s\n"
+    "ldr s24, [x23]\n"
+    "fmla v15.4s, v7.4s, v3.4s\n"
+    "ldr s20, [x23, %[input_col_stride1]]\n"
+    "fmla v16.4s, v21.4s, v1.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v14.4s, v21.4s, v2.4s\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "movi v26.16b, #0\n"
+    "ldr s8, [x28]\n"
+    "fmla v16.4s, v19.4s, v3.4s\n"
+    "subs x25, x25, #1\n"
+    "fmla v14.4s, v7.4s, v0.4s\n"
+    "ldr s2, [%[wbptr], #12]\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str s17, [%[outptr0]]\n"
+    "str s16, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v14.4s, v19.4s, v1.4s\n"
+    "str s15, [x22]\n"
+    "mov v17.16b, v11.16b\n"
+    "mov v15.16b, v11.16b\n"
+    "ldr s0, [%[wbptr], #32]\n"
+    "fmla v14.4s, v18.4s, v3.4s\n"
+    "ldr s1, [%[wbptr], #24]\n"
+    "mov v16.16b, v11.16b\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v17.4s, v6.4s, v13.4s\n"
+    "fmla v15.4s, v27.4s, v13.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "ldr s3, [%[wbptr], #36]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "str s14, [x22, %[output_col_stride1]]\n"
+    "mov v14.16b, v11.16b\n"
+    "add x22, x22, #4\n"
+    "fmla v17.4s, v27.4s, v5.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v17.4s, v24.4s, v12.4s\n"
+    "ldr s26, [x21, x24]\n"
+    "fmla v15.4s, v24.4s, v5.4s\n"
+    "ldr s27, [%[inptr0], x27]\n"
+    "fmla v16.4s, v22.4s, v5.4s\n"
+    "ldr s25, [x28, %[input_col_stride1]]\n"
+    "fmla v17.4s, v22.4s, v10.4s\n"
+    "ldr s24, [x23, x24]\n"
+    "fmla v15.4s, v22.4s, v4.4s\n"
+    "ldr s21, [x21, x27]\n"
+    "fmla v14.4s, v22.4s, v13.4s\n"
+    "ldr s7, [x28, x24]\n"
+    "fmla v17.4s, v9.4s, v2.4s\n"
+    "ldr s19, [x23, x27]\n"
+    "fmla v16.4s, v9.4s, v4.4s\n"
+    "ldr s18, [x28, x27]\n"
+    "fmla v15.4s, v8.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v20.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v20.4s, v12.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v15.4s, v20.4s, v10.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v14.4s, v20.4s, v5.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v17.4s, v26.4s, v1.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v17.4s, v24.4s, v3.4s\n"
+    "fmla v16.4s, v27.4s, v2.4s\n"
+    "fmla v15.4s, v25.4s, v0.4s\n"
+    "fmla v14.4s, v25.4s, v12.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmla v16.4s, v24.4s, v0.4s\n"
+    "str s17, [%[outptr0]]\n"
+    "fmla v15.4s, v24.4s, v1.4s\n"
+    "fmla v14.4s, v24.4s, v10.4s\n"
+    "fmla v16.4s, v21.4s, v1.4s\n"
+    "fmla v15.4s, v7.4s, v3.4s\n"
+    "fmla v14.4s, v21.4s, v2.4s\n"
+    "fmla v16.4s, v19.4s, v3.4s\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "fmla v14.4s, v7.4s, v0.4s\n"
+    "str s15, [x22]\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "fmla v14.4s, v19.4s, v1.4s\n"
+    "str s16, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v14.4s, v18.4s, v3.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "str s14, [x22, %[output_col_stride1]]\n"
+    "add x22, x22, #4\n"
+    "7:\n"
+    : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr)
+    : [n_channels] "r" ((long) n_channels), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
-    asm volatile (
-      "qW11B .req q0\n" "vW11B .req v0\n" "qW33A .req q1\n" "qU32B .req q1\n"
-      "vW33A .req v1\n" "vU32B .req v1\n" "qU44B .req q2\n" "qW21A .req q2\n"
-      "vU44B .req v2\n" "vW21A .req v2\n" "qU21B .req q3\n" "qU32A .req q3\n"
-      "vU21B .req v3\n" "vU32A .req v3\n" "qU43A .req q4\n" "qV21B .req q4\n"
-      "vU43A .req v4\n" "vV21B .req v4\n" "qU24A .req q5\n" "qU44A .req q5\n"
-      "qU33B .req q5\n" "vU24A .req v5\n" "vU44A .req v5\n" "vU33B .req v5\n"
-      "qU31A .req q6\n" "qV12B .req q6\n" "qU23A .req q6\n" "vU31A .req v6\n"
-      "vV12B .req v6\n" "vU23A .req v6\n" "qW31B .req q7\n" "qV22A .req q7\n"
-      "vW31B .req v7\n" "vV22A .req v7\n" "qV12A .req q8\n" "qW21B .req q8\n"
-      "vV12A .req v8\n" "vW21B .req v8\n" "qU22B .req q9\n" "qU34A .req q9\n"
-      "vU22B .req v9\n" "vU34A .req v9\n" "qU13B .req q10\n" "qU13A .req q10\n"
-      "vU13B .req v10\n" "vU13A .req v10\n" "qU34B .req q11\n" "qU22A .req q11\n"
-      "vU34B .req v11\n" "vU22A .req v11\n" "qU24B .req q12\n" "qU31B .req q12\n"
-      "vU24B .req v12\n" "vU31B .req v12\n" "qW12B .req q13\n" "qW13A .req q13\n"
-      "vW12B .req v13\n" "vW13A .req v13\n" "qV21A .req q14\n" "qV11B .req q14\n"
-      "vV21A .req v14\n" "vV11B .req v14\n" "qW32A .req q15\n" "qW32B .req q15\n"
-      "vW32A .req v15\n" "vW32B .req v15\n" "qW31A .req q16\n" "qV22B .req q16\n"
-      "vW31A .req v16\n" "vV22B .req v16\n"
-      "qW11A .req q17\n" "vW11A .req v17\n" "qW13B .req q18\n" "qU14A .req q18\n"
-      "vW13B .req v18\n" "vU14A .req v18\n" "qU33A .req q19\n" "qW33B .req q19\n"
-      "vU33A .req v19\n" "vW33B .req v19\n" "qW22A .req q20\n" "qU23B .req q20\n"
-      "vW22A .req v20\n" "vU23B .req v20\n" "qU12A .req q21\n" "qU42A .req q21\n"
-      "vU12A .req v21\n" "vU42A .req v21\n" "qU41A .req q22\n" "qU42B .req q22\n"
-      "vU41A .req v22\n" "vU42B .req v22\n" "qW23A .req q23\n" "qW23B .req q23\n"
-      "vW23A .req v23\n" "vW23B .req v23\n" "qU43B .req q24\n" "qU11A .req q24\n"
-      "vU43B .req v24\n" "vU11A .req v24\n" "qU12B .req q25\n" "qW12A .req q25\n"
-      "vU12B .req v25\n" "vW12A .req v25\n" "qU41B .req q26\n" "qV11A .req q26\n"
-      "vU41B .req v26\n" "vV11A .req v26\n" "qW22B .req q27\n" "vW22B .req v27\n"
-      "qU11B .req q28\n" "qU14B .req q28\n" "vU11B .req v28\n" "vU14B .req v28\n"
-      "qU21A .req q29\n" "vU21A .req v29\n"
-
-      "u_col_stride1 .req %x[u_col_stride]\n"
-      "u_col_stride2 .req x0\n"
-      "u_col_stride3 .req x1\n"
-      "uptr1 .req x2\n"
-      "uptr2 .req x3\n"
-      "uptr3 .req x4\n"
-      "wptr1 .req x5\n"
-      "wptr2 .req x6\n"
-      "vptr1 .req x7\n"
-      "w_col_stride1 .req %x[w_col_stride]\n"
-      "w_col_stride2 .req x8\n"
-
-      // Prepare strides and pointers
-      "add uptr1, %x[uptr0], %x[u_row_stride]\n"
-      "add uptr2,    uptr1 , %x[u_row_stride]\n"
-      "add uptr3,    uptr2 , %x[u_row_stride]\n"
-      "add wptr1, %x[wptr0], %x[w_row_stride]\n"
-      "add wptr2,    wptr1 , %x[w_row_stride]\n"
-      "add vptr1, %x[vptr0], %x[v_row_stride]\n"
-      "add u_col_stride2, %x[u_col_stride], %x[u_col_stride]\n"
-      "add u_col_stride3,    u_col_stride2 , %x[u_col_stride]\n"
-      "add w_col_stride2, %x[w_col_stride], %x[w_col_stride]\n"
-
-      // Load in preparation for execution
-      "ldr qU14A, [%x[uptr0], u_col_stride3]\n"
-      "ldr qW13A, [%x[wptr0], w_col_stride2]\n"
-      "ldr qU13A, [%x[uptr0], u_col_stride2]\n"
-      "ldr qW12A, [%x[wptr0], w_col_stride1]\n"
-      "ldr qU12A, [%x[uptr0], u_col_stride1]\n"
-      "ldr qW11A, [%x[wptr0]], #0x10\n"
-      "ldr qU24A, [uptr1, u_col_stride3]\n"
-      "ldr qW23A, [wptr1, w_col_stride2]\n"
-      "ldr qU23A, [uptr1, u_col_stride2]\n"
-      "ldr qW22A, [wptr1, w_col_stride1]\n"
-      "ldr qU22A, [uptr1, u_col_stride1]\n"
-      "ldr qW21A, [wptr1], #0x10\n"
-      "ldr qU34A, [uptr2, u_col_stride3]\n"
-      "ldr qW33A, [wptr2, w_col_stride2]\n"
-      "ldr qU33A, [uptr2, u_col_stride2]\n"
-      "ldr qW32A, [wptr2, w_col_stride1]\n"
-      "ldr qU32A, [uptr2, u_col_stride1]\n"
-      "ldr qW31A, [wptr2], #0x10\n"
-      "fmul vV12A.4s, vU14A.4s, vW13A.4s\n"
-      "cbz %x[iters], 2f\n"  // Jump to tail if doing zero iterations of loop
-
-      "1:"  // Main loop body
-        // A part
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "ldr qU14B, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "ldr qW13B, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "ldr qU13B, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "ldr qW12B, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "ldr qU12B, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "ldr qW11B, [%x[wptr0]], #0x10\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "ldr qU24B, [uptr1, u_col_stride3]\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "ldr qW23B, [wptr1, w_col_stride2]\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], %x[v_col_stride]]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "ldr qU23B, [uptr1, u_col_stride2]\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "ldr qW22B, [wptr1, w_col_stride1]\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "ldr qU22B, [uptr1, u_col_stride1]\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "ldr qW21B, [wptr1], #0x10\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "ldr qU34B, [uptr2, u_col_stride3]\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "ldr qW33B, [wptr2, w_col_stride2]\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, %x[v_col_stride]]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "ldr qU33B, [uptr2, u_col_stride2]\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "ldr qW32B, [wptr2, w_col_stride1]\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "ldr qU32B, [uptr2, u_col_stride1]\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "ldr qW31B, [wptr2], #0x10\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-
-        // B part
-        "fmul vV12B.4s, vU14B.4s, vW13B.4s\n"
-        "fmul vV11B.4s, vU13B.4s, vW13B.4s\n"
-        "fmla vV12B.4s, vU13B.4s, vW12B.4s\n"
-        "fmla vV11B.4s, vU12B.4s, vW12B.4s\n"
-        "fmla vV12B.4s, vU12B.4s, vW11B.4s\n"
-        "fmla vV12B.4s, vU24B.4s, vW23B.4s\n"
-        "fmul vV22B.4s, vU24B.4s, vW13B.4s\n"
-        "subs %x[iters], %x[iters], #1\n"
-        "fmla vV11B.4s, vU23B.4s, vW23B.4s\n"
-        "ldr qU44B, [uptr3, u_col_stride3]\n"
-        "fmla vV12B.4s, vU23B.4s, vW22B.4s\n"
-        "ldr qU43B, [uptr3, u_col_stride2]\n"
-        "fmul vV21B.4s, vU23B.4s, vW13B.4s\n"
-        "ldr qU42B, [uptr3, u_col_stride1]\n"
-        "fmla vV22B.4s, vU23B.4s, vW12B.4s\n"
-        "ldr qU11B, [%x[uptr0]], #0x10\n"
-        "fmla vV11B.4s, vU22B.4s, vW22B.4s\n"
-        "ldr qU21B, [uptr1], #0x10\n"
-        "fmla vV12B.4s, vU22B.4s, vW21B.4s\n"
-        "ldr qU31B, [uptr2], #0x10\n"
-        "fmla vV21B.4s, vU22B.4s, vW12B.4s\n"
-        "ldr qU41B, [uptr3], #0x10\n"
-        "fmla vV22B.4s, vU22B.4s, vW11B.4s\n"
-        "ldr qU14A, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12B.4s, vU34B.4s, vW33B.4s\n"
-        "ldr qW13A, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV22B.4s, vU34B.4s, vW23B.4s\n"
-        "ldr qU13A, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV11B.4s, vU33B.4s, vW33B.4s\n"
-        "ldr qW12A, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV12B.4s, vU33B.4s, vW32B.4s\n"
-        "ldr qU12A, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV21B.4s, vU33B.4s, vW23B.4s\n"
-        "ldr qW11A, [%x[wptr0]], #0x10\n"
-        "fmla vV22B.4s, vU33B.4s, vW22B.4s\n"
-        "ldr qU24A, [uptr1, u_col_stride3]\n"
-        "fmla vV11B.4s, vU32B.4s, vW32B.4s\n"
-        "ldr qW23A, [wptr1, w_col_stride2]\n"
-        "fmla vV12B.4s, vU32B.4s, vW31B.4s\n"
-        "str qV12B, [%x[vptr0], %x[v_col_stride]]\n"
-        "fmla vV21B.4s, vU32B.4s, vW22B.4s\n"
-        "ldr qU23A, [uptr1, u_col_stride2]\n"
-        "fmla vV22B.4s, vU32B.4s, vW21B.4s\n"
-        "ldr qW22A, [wptr1, w_col_stride1]\n"
-        "fmla vV22B.4s, vU44B.4s, vW33B.4s\n"
-        "ldr qU22A, [uptr1, u_col_stride1]\n"
-        "fmla vV21B.4s, vU43B.4s, vW33B.4s\n"
-        "ldr qW21A, [wptr1], #0x10\n"
-        "fmla vV22B.4s, vU43B.4s, vW32B.4s\n"
-        "ldr qU34A, [uptr2, u_col_stride3]\n"
-        "fmla vV21B.4s, vU42B.4s, vW32B.4s\n"
-        "ldr qW33A, [wptr2, w_col_stride2]\n"
-        "fmla vV22B.4s, vU42B.4s, vW31B.4s\n"
-        "str qV22B, [vptr1, %x[v_col_stride]]\n"
-        "fmla vV11B.4s, vU11B.4s, vW11B.4s\n"
-        "ldr qU33A, [uptr2, u_col_stride2]\n"
-        "fmla vV11B.4s, vU21B.4s, vW21B.4s\n"
-        "ldr qW32A, [wptr2, w_col_stride1]\n"
-        "fmla vV21B.4s, vU21B.4s, vW11B.4s\n"
-        "ldr qU32A, [uptr2, u_col_stride1]\n"
-        "fmla vV11B.4s, vU31B.4s, vW31B.4s\n"
-        "str qV11B, [%x[vptr0]], #0x10\n"
-        "fmla vV21B.4s, vU31B.4s, vW21B.4s\n"
-        "ldr qW31A, [wptr2], #0x10\n"
-        "fmla vV21B.4s, vU41B.4s, vW31B.4s\n"
-        "str qV21B, [vptr1], #0x10\n"
-        "fmul vV12A.4s, vU14A.4s, vW13A.4s\n"
-        "bne 1b\n"  // Loop
-
-      "2:"  // Branch destination for zero loops
-        "cbnz %w[odd_tail], 4f\n"
-
-      "3:"  // Even number of iterations
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "ldr qU14B, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "ldr qW13B, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "ldr qU13B, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "ldr qW12B, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "ldr qU12B, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "ldr qW11B, [%x[wptr0]], #0x10\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "ldr qU24B, [uptr1, u_col_stride3]\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "ldr qW23B, [wptr1, w_col_stride2]\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], %x[v_col_stride]]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "ldr qU23B, [uptr1, u_col_stride2]\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "ldr qW22B, [wptr1, w_col_stride1]\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "ldr qU22B, [uptr1, u_col_stride1]\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "ldr qW21B, [wptr1], #0x10\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "ldr qU34B, [uptr2, u_col_stride3]\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "ldr qW33B, [wptr2, w_col_stride2]\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, %x[v_col_stride]]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "ldr qU33B, [uptr2, u_col_stride2]\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "ldr qW32B, [wptr2, w_col_stride1]\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "ldr qU32B, [uptr2, u_col_stride1]\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "ldr qW31B, [wptr2], #0x10\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-
-        "fmul vV12B.4s, vU14B.4s, vW13B.4s\n"
-        "fmul vV11B.4s, vU13B.4s, vW13B.4s\n"
-        "fmla vV12B.4s, vU13B.4s, vW12B.4s\n"
-        "fmla vV11B.4s, vU12B.4s, vW12B.4s\n"
-        "fmla vV12B.4s, vU12B.4s, vW11B.4s\n"
-        "fmla vV12B.4s, vU24B.4s, vW23B.4s\n"
-        "fmul vV22B.4s, vU24B.4s, vW13B.4s\n"
-        "fmla vV11B.4s, vU23B.4s, vW23B.4s\n"
-        "ldr qU44B, [uptr3, u_col_stride3]\n"
-        "fmla vV12B.4s, vU23B.4s, vW22B.4s\n"
-        "ldr qU43B, [uptr3, u_col_stride2]\n"
-        "fmul vV21B.4s, vU23B.4s, vW13B.4s\n"
-        "ldr qU42B, [uptr3, u_col_stride1]\n"
-        "fmla vV22B.4s, vU23B.4s, vW12B.4s\n"
-        "ldr qU11B, [%x[uptr0]], #0x10\n"
-        "fmla vV11B.4s, vU22B.4s, vW22B.4s\n"
-        "ldr qU21B, [uptr1], #0x10\n"
-        "fmla vV12B.4s, vU22B.4s, vW21B.4s\n"
-        "ldr qU31B, [uptr2], #0x10\n"
-        "fmla vV21B.4s, vU22B.4s, vW12B.4s\n"
-        "ldr qU41B, [uptr3], #0x10\n"
-        "fmla vV22B.4s, vU22B.4s, vW11B.4s\n"
-        "fmla vV12B.4s, vU34B.4s, vW33B.4s\n"
-        "fmla vV22B.4s, vU34B.4s, vW23B.4s\n"
-        "fmla vV11B.4s, vU33B.4s, vW33B.4s\n"
-        "fmla vV12B.4s, vU33B.4s, vW32B.4s\n"
-        "fmla vV21B.4s, vU33B.4s, vW23B.4s\n"
-        "fmla vV22B.4s, vU33B.4s, vW22B.4s\n"
-        "fmla vV11B.4s, vU32B.4s, vW32B.4s\n"
-        "fmla vV12B.4s, vU32B.4s, vW31B.4s\n"
-        "str qV12B, [%x[vptr0], %x[v_col_stride]]\n"
-        "fmla vV21B.4s, vU32B.4s, vW22B.4s\n"
-        "fmla vV22B.4s, vU32B.4s, vW21B.4s\n"
-        "fmla vV22B.4s, vU44B.4s, vW33B.4s\n"
-        "fmla vV21B.4s, vU43B.4s, vW33B.4s\n"
-        "fmla vV22B.4s, vU43B.4s, vW32B.4s\n"
-        "fmla vV21B.4s, vU42B.4s, vW32B.4s\n"
-        "fmla vV22B.4s, vU42B.4s, vW31B.4s\n"
-        "str qV22B, [vptr1, %x[v_col_stride]]\n"
-        "fmla vV11B.4s, vU11B.4s, vW11B.4s\n"
-        "fmla vV11B.4s, vU21B.4s, vW21B.4s\n"
-        "fmla vV21B.4s, vU21B.4s, vW11B.4s\n"
-        "fmla vV11B.4s, vU31B.4s, vW31B.4s\n"
-        "str qV11B, [%x[vptr0]], #0x10\n"
-        "fmla vV21B.4s, vU31B.4s, vW21B.4s\n"
-        "fmla vV21B.4s, vU41B.4s, vW31B.4s\n"
-        "str qV21B, [vptr1], #0x10\n"
-        "b 5f\n"
-
-      "4:"  // Odd number of iterations
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], %x[v_col_stride]]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, %x[v_col_stride]]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-
-      "5:"  // End of method
-
-      ".unreq qW11B\n" ".unreq qW33A\n" ".unreq qU32B\n"
-      ".unreq qU44B\n" ".unreq qW21A\n" ".unreq qU21B\n" ".unreq qU32A\n"
-      ".unreq qU43A\n" ".unreq qV21B\n"
-      ".unreq qU24A\n" ".unreq qU44A\n" ".unreq qU33B\n"
-      ".unreq qU31A\n" ".unreq qV12B\n" ".unreq qU23A\n"
-      ".unreq qW31B\n" ".unreq qV22A\n" ".unreq qV12A\n" ".unreq qW21B\n"
-      ".unreq qU22B\n" ".unreq qU34A\n" ".unreq qU13B\n" ".unreq qU13A\n"
-      ".unreq qU34B\n" ".unreq qU22A\n" ".unreq qU24B\n" ".unreq qU31B\n"
-      ".unreq qW12B\n" ".unreq qW13A\n" ".unreq qV21A\n" ".unreq qV11B\n"
-      ".unreq qW32A\n" ".unreq qW32B\n" ".unreq qW31A\n" ".unreq qV22B\n"
-      ".unreq qW11A\n" ".unreq qW13B\n" ".unreq qU14A\n"
-      ".unreq qU33A\n" ".unreq qW33B\n" ".unreq qW22A\n" ".unreq qU23B\n"
-      ".unreq qU12A\n" ".unreq qU42A\n" ".unreq qU41A\n" ".unreq qU42B\n"
-      ".unreq qW23A\n" ".unreq qW23B\n" ".unreq qU43B\n" ".unreq qU11A\n"
-      ".unreq qU12B\n" ".unreq qW12A\n" ".unreq qU41B\n" ".unreq qV11A\n"
-      ".unreq qW22B\n" ".unreq qU11B\n" ".unreq qU14B\n" ".unreq qU21A\n"
-      ".unreq vW11B\n" ".unreq vW33A\n" ".unreq vU32B\n"
-      ".unreq vU44B\n" ".unreq vW21A\n" ".unreq vU21B\n" ".unreq vU32A\n"
-      ".unreq vU43A\n" ".unreq vV21B\n"
-      ".unreq vU24A\n" ".unreq vU44A\n" ".unreq vU33B\n"
-      ".unreq vU31A\n" ".unreq vV12B\n" ".unreq vU23A\n"
-      ".unreq vW31B\n" ".unreq vV22A\n" ".unreq vV12A\n" ".unreq vW21B\n"
-      ".unreq vU22B\n" ".unreq vU34A\n" ".unreq vU13B\n" ".unreq vU13A\n"
-      ".unreq vU34B\n" ".unreq vU22A\n" ".unreq vU24B\n" ".unreq vU31B\n"
-      ".unreq vW12B\n" ".unreq vW13A\n" ".unreq vV21A\n" ".unreq vV11B\n"
-      ".unreq vW32A\n" ".unreq vW32B\n" ".unreq vW31A\n" ".unreq vV22B\n"
-      ".unreq vW11A\n" ".unreq vW13B\n" ".unreq vU14A\n"
-      ".unreq vU33A\n" ".unreq vW33B\n" ".unreq vW22A\n" ".unreq vU23B\n"
-      ".unreq vU12A\n" ".unreq vU42A\n" ".unreq vU41A\n" ".unreq vU42B\n"
-      ".unreq vW23A\n" ".unreq vW23B\n" ".unreq vU43B\n" ".unreq vU11A\n"
-      ".unreq vU12B\n" ".unreq vW12A\n" ".unreq vU41B\n" ".unreq vV11A\n"
-      ".unreq vW22B\n" ".unreq vU11B\n" ".unreq vU14B\n" ".unreq vU21A\n"
-      ".unreq u_col_stride1\n" ".unreq u_col_stride2\n"
-      ".unreq u_col_stride3\n"
-      ".unreq uptr1\n" ".unreq uptr2\n" ".unreq uptr3\n"
-      ".unreq wptr1\n" ".unreq wptr2\n" ".unreq vptr1\n"
-      ".unreq w_col_stride1\n" ".unreq w_col_stride2\n"
-
-      : [uptr0] "+r" (uptr0), [vptr0] "+r" (vptr0), [wptr0] "+r" (wptr0),
-        [iters] "+r" (n_iters)
-      : [u_row_stride] "r" (in_row_stride * sizeof(float)),
-        [u_col_stride] "r" (in_col_stride * sizeof(float)),
-        [v_row_stride] "r" (out_row_stride * sizeof(float)),
-        [v_col_stride] "r" (out_col_stride * sizeof(float)),
-        [w_row_stride] "r" (weight_row_stride * sizeof(float)),
-        [w_col_stride] "r" (weight_col_stride * sizeof(float)),
-        [odd_tail] "r" (odd_tail)
-      : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "cc",
-        "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
-        "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
-        "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "memory"
-    );
-  }
-
-  if (channels_remaining)
-  {
-    // Fall back on the unoptimised version to clean up the tail
-    ConvImpl::process_tile<false>(
-        channels_remaining,
-        wptr0, weight_row_stride, weight_col_stride,
-        uptr0, in_row_stride, in_col_stride,
-        vptr0, out_row_stride, out_col_stride,
-        0, 0, 0, 0, 0, 0
-    );
-  }
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x21, %[inptr0], %[input_row_stride]\n"
+    "add x23, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x24, %[outptr0], %[output_row_stride]\n"
+    "add x27, x21, %[input_row_stride]\n"
+    "add x22, x23, %[input_col_stride1]\n"
+    "and x25, %[n_channels], #3\n"
+    "add x28, x27, %[input_row_stride]\n"
+    "lsr x26, %[n_channels], #2\n"
+    "cbz x26, 4f\n"
+    "1:\n"
+    "ldr q19, [%[wbptr]]\n"
+    "subs x26, x26, #1\n"
+    "mov v3.16b, v19.16b\n"
+    "ldr q12, [%[wbptr], #16]\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr q11, [%[wbptr], #32]\n"
+    "mov v2.16b, v19.16b\n"
+    "ldr q10, [%[wbptr], #48]\n"
+    "mov v0.16b, v19.16b\n"
+    "ldr q13, [%[wbptr], #64]\n"
+    "ldr q23, [%[wbptr], #80]\n"
+    "ldr q15, [%[wbptr], #96]\n"
+    "ldr q20, [%[wbptr], #112]\n"
+    "ldr q21, [%[wbptr], #128]\n"
+    "ldr q14, [%[wbptr], #144]\n"
+    "ldr q16, [%[inptr0]]\n"
+    "fmla v3.4s, v16.4s, v12.4s\n"
+    "ldr q28, [x21]\n"
+    "fmla v1.4s, v28.4s, v12.4s\n"
+    "ldr q22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr q24, [x27]\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "ldr q8, [x21, %[input_col_stride1]]\n"
+    "ldr q9, [%[inptr0], x23]\n"
+    "ldr q18, [x28]\n"
+    "ldr q6, [x27, %[input_col_stride1]]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v3.4s, v24.4s, v20.4s\n"
+    "ldr q25, [x21, x23]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "ldr q28, [%[inptr0], x22]\n"
+    "fmla v2.4s, v8.4s, v13.4s\n"
+    "ldr q24, [x28, %[input_col_stride1]]\n"
+    "fmla v3.4s, v8.4s, v23.4s\n"
+    "ldr q27, [x27, x23]\n"
+    "fmla v1.4s, v8.4s, v11.4s\n"
+    "ldr q7, [x21, x22]\n"
+    "fmla v0.4s, v8.4s, v12.4s\n"
+    "ldr q17, [x28, x23]\n"
+    "fmla v3.4s, v9.4s, v10.4s\n"
+    "ldr q5, [x27, x22]\n"
+    "fmla v2.4s, v9.4s, v11.4s\n"
+    "ldr q4, [x28, x22]\n"
+    "fmla v1.4s, v18.4s, v20.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v3.4s, v6.4s, v21.4s\n"
+    "ldr q19, [%[wbptr]]\n"
+    "fmla v2.4s, v6.4s, v20.4s\n"
+    "ldr q12, [%[wbptr], #16]\n"
+    "fmla v1.4s, v6.4s, v23.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v6.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v3.4s, v25.4s, v15.4s\n"
+    "ldr q16, [%[inptr0]]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "ldr q22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v25.4s, v23.4s\n"
+    "ldr q13, [%[wbptr], #64]\n"
+    "fmla v0.4s, v25.4s, v11.4s\n"
+    "ldr q9, [%[inptr0], x23]\n"
+    "fmla v1.4s, v24.4s, v21.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "ldr q28, [x21]\n"
+    "fmla v0.4s, v24.4s, v20.4s\n"
+    "ldr q11, [%[wbptr], #32]\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "ldr q8, [x21, %[input_col_stride1]]\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v2.4s, v27.4s, v21.4s\n"
+    "ldr q20, [%[wbptr], #112]\n"
+    "fmla v0.4s, v27.4s, v23.4s\n"
+    "ldr q24, [x27]\n"
+    "fmla v1.4s, v17.4s, v14.4s\n"
+    "ldr q6, [x27, %[input_col_stride1]]\n"
+    "fmla v2.4s, v7.4s, v15.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v0.4s, v7.4s, v10.4s\n"
+    "ldr q23, [%[wbptr], #80]\n"
+    "movi v25.16b, #0\n"
+    "ldr q18, [x28]\n"
+    "fmla v2.4s, v5.4s, v14.4s\n"
+    "subs x26, x26, #1\n"
+    "fmla v0.4s, v17.4s, v21.4s\n"
+    "ldr q10, [%[wbptr], #48]\n"
+    "fmov v26.4s, #6.0\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
+    "fmax v2.4s, v2.4s, v25.4s\n"
+    "fmax v1.4s, v1.4s, v25.4s\n"
+    "fmla v0.4s, v5.4s, v15.4s\n"
+    "ldr q21, [%[wbptr], #128]\n"
+    "fmin v3.4s, v3.4s, v26.4s\n"
+    "fmin v2.4s, v2.4s, v26.4s\n"
+    "fmin v1.4s, v1.4s, v26.4s\n"
+    "str q3, [%[outptr0]]\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v4.4s, v14.4s\n"
+    "str q1, [x24]\n"
+    "mov v3.16b, v19.16b\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr q15, [%[wbptr], #96]\n"
+    "fmax v0.4s, v0.4s, v25.4s\n"
+    "ldr q14, [%[wbptr], #144]\n"
+    "mov v2.16b, v19.16b\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmin v0.4s, v0.4s, v26.4s\n"
+    "fmla v3.4s, v16.4s, v12.4s\n"
+    "fmla v1.4s, v28.4s, v12.4s\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "str q0, [x24, %[output_col_stride1]]\n"
+    "mov v0.16b, v19.16b\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v3.4s, v24.4s, v20.4s\n"
+    "ldr q25, [x21, x23]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "ldr q28, [%[inptr0], x22]\n"
+    "fmla v2.4s, v8.4s, v13.4s\n"
+    "ldr q24, [x28, %[input_col_stride1]]\n"
+    "fmla v3.4s, v8.4s, v23.4s\n"
+    "ldr q27, [x27, x23]\n"
+    "fmla v1.4s, v8.4s, v11.4s\n"
+    "ldr q7, [x21, x22]\n"
+    "fmla v0.4s, v8.4s, v12.4s\n"
+    "ldr q17, [x28, x23]\n"
+    "fmla v3.4s, v9.4s, v10.4s\n"
+    "ldr q5, [x27, x22]\n"
+    "fmla v2.4s, v9.4s, v11.4s\n"
+    "ldr q4, [x28, x22]\n"
+    "fmla v1.4s, v18.4s, v20.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v3.4s, v6.4s, v21.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v2.4s, v6.4s, v20.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v1.4s, v6.4s, v23.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v0.4s, v6.4s, v13.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v3.4s, v25.4s, v15.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "fmla v2.4s, v25.4s, v23.4s\n"
+    "fmla v0.4s, v25.4s, v11.4s\n"
+    "movi v25.16b, #0\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "fmov v26.4s, #6.0\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "fmla v1.4s, v24.4s, v21.4s\n"
+    "fmla v0.4s, v24.4s, v20.4s\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v2.4s, v27.4s, v21.4s\n"
+    "fmla v0.4s, v27.4s, v23.4s\n"
+    "fmin v3.4s, v3.4s, v26.4s\n"
+    "str q3, [%[outptr0]]\n"
+    "fmla v2.4s, v7.4s, v15.4s\n"
+    "fmla v0.4s, v7.4s, v10.4s\n"
+    "fmla v1.4s, v17.4s, v14.4s\n"
+    "fmla v2.4s, v5.4s, v14.4s\n"
+    "fmla v0.4s, v17.4s, v21.4s\n"
+    "fmax v1.4s, v1.4s, v25.4s\n"
+    "fmax v2.4s, v2.4s, v25.4s\n"
+    "fmla v0.4s, v5.4s, v15.4s\n"
+    "fmin v1.4s, v1.4s, v26.4s\n"
+    "fmin v2.4s, v2.4s, v26.4s\n"
+    "str q1, [x24]\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v4.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmax v0.4s, v0.4s, v25.4s\n"
+    "fmin v0.4s, v0.4s, v26.4s\n"
+    "str q0, [x24, %[output_col_stride1]]\n"
+    "add x24, x24, #16\n"
+    "4:\n"
+    "cbz x25, 7f\n"
+    "ldr s19, [%[wbptr]]\n"
+    "mov v3.16b, v19.16b\n"
+    "ldr s12, [%[wbptr], #4]\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr s11, [%[wbptr], #8]\n"
+    "mov v2.16b, v19.16b\n"
+    "ldr s10, [%[wbptr], #12]\n"
+    "mov v0.16b, v19.16b\n"
+    "ldr s13, [%[wbptr], #16]\n"
+    "ldr s23, [%[wbptr], #20]\n"
+    "subs x25, x25, #1\n"
+    "ldr s15, [%[wbptr], #24]\n"
+    "ldr s20, [%[wbptr], #28]\n"
+    "ldr s21, [%[wbptr], #32]\n"
+    "ldr s14, [%[wbptr], #36]\n"
+    "ldr s16, [%[inptr0]]\n"
+    "ldr s28, [x21]\n"
+    "fmla v3.4s, v16.4s, v12.4s\n"
+    "ldr s22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v1.4s, v28.4s, v12.4s\n"
+    "ldr s24, [x27]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr s8, [x21, %[input_col_stride1]]\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "ldr s9, [%[inptr0], x23]\n"
+    "ldr s18, [x28]\n"
+    "ldr s6, [x27, %[input_col_stride1]]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v3.4s, v24.4s, v20.4s\n"
+    "ldr s25, [x21, x23]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "ldr s28, [%[inptr0], x22]\n"
+    "fmla v2.4s, v8.4s, v13.4s\n"
+    "ldr s24, [x28, %[input_col_stride1]]\n"
+    "fmla v3.4s, v8.4s, v23.4s\n"
+    "ldr s27, [x27, x23]\n"
+    "fmla v1.4s, v8.4s, v11.4s\n"
+    "ldr s7, [x21, x22]\n"
+    "fmla v0.4s, v8.4s, v12.4s\n"
+    "ldr s17, [x28, x23]\n"
+    "fmla v3.4s, v9.4s, v10.4s\n"
+    "ldr s5, [x27, x22]\n"
+    "fmla v2.4s, v9.4s, v11.4s\n"
+    "ldr s4, [x28, x22]\n"
+    "fmla v1.4s, v18.4s, v20.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v3.4s, v6.4s, v21.4s\n"
+    "ldr s19, [%[wbptr]]\n"
+    "fmla v2.4s, v6.4s, v20.4s\n"
+    "ldr s12, [%[wbptr], #4]\n"
+    "fmla v1.4s, v6.4s, v23.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v6.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v3.4s, v25.4s, v15.4s\n"
+    "ldr s16, [%[inptr0]]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "ldr s22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v25.4s, v23.4s\n"
+    "ldr s13, [%[wbptr], #16]\n"
+    "fmla v0.4s, v25.4s, v11.4s\n"
+    "ldr s9, [%[inptr0], x23]\n"
+    "fmla v1.4s, v24.4s, v21.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "ldr s28, [x21]\n"
+    "fmla v0.4s, v24.4s, v20.4s\n"
+    "ldr s11, [%[wbptr], #8]\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "ldr s8, [x21, %[input_col_stride1]]\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v2.4s, v27.4s, v21.4s\n"
+    "ldr s20, [%[wbptr], #28]\n"
+    "fmla v0.4s, v27.4s, v23.4s\n"
+    "ldr s24, [x27]\n"
+    "fmla v1.4s, v17.4s, v14.4s\n"
+    "ldr s6, [x27, %[input_col_stride1]]\n"
+    "fmla v2.4s, v7.4s, v15.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v0.4s, v7.4s, v10.4s\n"
+    "ldr s23, [%[wbptr], #20]\n"
+    "movi v25.16b, #0\n"
+    "ldr s18, [x28]\n"
+    "fmla v2.4s, v5.4s, v14.4s\n"
+    "subs x25, x25, #1\n"
+    "fmla v0.4s, v17.4s, v21.4s\n"
+    "ldr s10, [%[wbptr], #12]\n"
+    "fmov v26.4s, #6.0\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
+    "fmax v2.4s, v2.4s, v25.4s\n"
+    "fmax v1.4s, v1.4s, v25.4s\n"
+    "fmla v0.4s, v5.4s, v15.4s\n"
+    "ldr s21, [%[wbptr], #32]\n"
+    "fmin v3.4s, v3.4s, v26.4s\n"
+    "fmin v2.4s, v2.4s, v26.4s\n"
+    "fmin v1.4s, v1.4s, v26.4s\n"
+    "str s3, [%[outptr0]]\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v4.4s, v14.4s\n"
+    "str s1, [x24]\n"
+    "mov v3.16b, v19.16b\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr s15, [%[wbptr], #24]\n"
+    "fmax v0.4s, v0.4s, v25.4s\n"
+    "ldr s14, [%[wbptr], #36]\n"
+    "mov v2.16b, v19.16b\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmin v0.4s, v0.4s, v26.4s\n"
+    "fmla v3.4s, v16.4s, v12.4s\n"
+    "fmla v1.4s, v28.4s, v12.4s\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "str s0, [x24, %[output_col_stride1]]\n"
+    "mov v0.16b, v19.16b\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v3.4s, v24.4s, v20.4s\n"
+    "ldr s25, [x21, x23]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "ldr s28, [%[inptr0], x22]\n"
+    "fmla v2.4s, v8.4s, v13.4s\n"
+    "ldr s24, [x28, %[input_col_stride1]]\n"
+    "fmla v3.4s, v8.4s, v23.4s\n"
+    "ldr s27, [x27, x23]\n"
+    "fmla v1.4s, v8.4s, v11.4s\n"
+    "ldr s7, [x21, x22]\n"
+    "fmla v0.4s, v8.4s, v12.4s\n"
+    "ldr s17, [x28, x23]\n"
+    "fmla v3.4s, v9.4s, v10.4s\n"
+    "ldr s5, [x27, x22]\n"
+    "fmla v2.4s, v9.4s, v11.4s\n"
+    "ldr s4, [x28, x22]\n"
+    "fmla v1.4s, v18.4s, v20.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v3.4s, v6.4s, v21.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v2.4s, v6.4s, v20.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v1.4s, v6.4s, v23.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v0.4s, v6.4s, v13.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v3.4s, v25.4s, v15.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "fmla v2.4s, v25.4s, v23.4s\n"
+    "fmla v0.4s, v25.4s, v11.4s\n"
+    "movi v25.16b, #0\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "fmov v26.4s, #6.0\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "fmla v1.4s, v24.4s, v21.4s\n"
+    "fmla v0.4s, v24.4s, v20.4s\n"
+    "fmax v3.4s, v3.4s, v25.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v2.4s, v27.4s, v21.4s\n"
+    "fmla v0.4s, v27.4s, v23.4s\n"
+    "fmin v3.4s, v3.4s, v26.4s\n"
+    "str s3, [%[outptr0]]\n"
+    "fmla v2.4s, v7.4s, v15.4s\n"
+    "fmla v0.4s, v7.4s, v10.4s\n"
+    "fmla v1.4s, v17.4s, v14.4s\n"
+    "fmla v2.4s, v5.4s, v14.4s\n"
+    "fmla v0.4s, v17.4s, v21.4s\n"
+    "fmax v1.4s, v1.4s, v25.4s\n"
+    "fmax v2.4s, v2.4s, v25.4s\n"
+    "fmla v0.4s, v5.4s, v15.4s\n"
+    "fmin v1.4s, v1.4s, v26.4s\n"
+    "fmin v2.4s, v2.4s, v26.4s\n"
+    "str s1, [x24]\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v4.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmax v0.4s, v0.4s, v25.4s\n"
+    "fmin v0.4s, v0.4s, v26.4s\n"
+    "str s0, [x24, %[output_col_stride1]]\n"
+    "add x24, x24, #4\n"
+    "7:\n"
+    : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
 }
 
 #endif  // __aarch64__
 
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+template class DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float, float>;
 
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
index 9ce43f9..4ac6276 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,74 +25,1270 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<2, 2, 3, 3, 2, 2, float, float>;
+
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>;
+
+#ifdef __aarch64__
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::None>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x23, %[inptr0], %[input_row_stride]\n"
+    "add x19, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x22, %[outptr0], %[output_row_stride]\n"
+    "add x24, x23, %[input_row_stride]\n"
+    "add x20, x19, %[input_col_stride1]\n"
+    "and x27, %[n_channels], #3\n"
+    "add x25, x24, %[input_row_stride]\n"
+    "add x21, x20, %[input_col_stride1]\n"
+    "lsr x28, %[n_channels], #2\n"
+    "add x26, x25, %[input_row_stride]\n"
+    "cbz x28, 4f\n"
+    "1:\n"
+    "ldr q14, [%[wbptr]]\n"
+    "subs x28, x28, #1\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr q8, [%[wbptr], #16]\n"
+    "mov v10.16b, v14.16b\n"
+    "ldr q7, [%[wbptr], #32]\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr q6, [%[wbptr], #48]\n"
+    "mov v9.16b, v14.16b\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "ldr q15, [%[inptr0]]\n"
+    "fmla v12.4s, v15.4s, v8.4s\n"
+    "ldr q20, [x23]\n"
+    "ldr q13, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q17, [x24]\n"
+    "fmla v10.4s, v17.4s, v8.4s\n"
+    "ldr q16, [x23, %[input_col_stride1]]\n"
+    "fmla v12.4s, v20.4s, v5.4s\n"
+    "ldr q18, [%[inptr0], x19]\n"
+    "ldr q14, [x25]\n"
+    "ldr q15, [x24, %[input_col_stride1]]\n"
+    "fmla v12.4s, v13.4s, v7.4s\n"
+    "fmla v12.4s, v17.4s, v2.4s\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v11.4s, v18.4s, v8.4s\n"
+    "ldr q19, [x23, x19]\n"
+    "fmla v10.4s, v14.4s, v5.4s\n"
+    "ldr q20, [%[inptr0], x20]\n"
+    "fmla v12.4s, v15.4s, v1.4s\n"
+    "ldr q14, [x26]\n"
+    "fmla v11.4s, v19.4s, v5.4s\n"
+    "ldr q13, [x25, %[input_col_stride1]]\n"
+    "fmla v10.4s, v15.4s, v7.4s\n"
+    "ldr q17, [x24, x19]\n"
+    "fmla v12.4s, v19.4s, v3.4s\n"
+    "ldr q19, [x23, x20]\n"
+    "fmla v11.4s, v20.4s, v7.4s\n"
+    "ldr q18, [%[inptr0], x21]\n"
+    "fmla v10.4s, v14.4s, v2.4s\n"
+    "ldr q16, [x26, %[input_col_stride1]]\n"
+    "fmla v12.4s, v17.4s, v0.4s\n"
+    "ldr q14, [x25, x19]\n"
+    "fmla v11.4s, v17.4s, v2.4s\n"
+    "ldr q15, [x24, x20]\n"
+    "fmla v10.4s, v13.4s, v4.4s\n"
+    "ldr q13, [x23, x21]\n"
+    "str q12, [%[outptr0]]\n"
+    "fmla v9.4s, v17.4s, v8.4s\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr q12, [x26, x19]\n"
+    "fmla v10.4s, v17.4s, v6.4s\n"
+    "ldr q20, [x25, x20]\n"
+    "fmla v9.4s, v14.4s, v5.4s\n"
+    "ldr q17, [x24, x21]\n"
+    "fmla v11.4s, v18.4s, v6.4s\n"
+    "ldr q19, [x26, x20]\n"
+    "fmla v10.4s, v16.4s, v1.4s\n"
+    "ldr q18, [x25, x21]\n"
+    "fmla v9.4s, v15.4s, v7.4s\n"
+    "ldr q16, [x26, x21]\n"
+    "fmla v11.4s, v15.4s, v1.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v10.4s, v14.4s, v3.4s\n"
+    "ldr q14, [%[wbptr]]\n"
+    "fmla v9.4s, v12.4s, v2.4s\n"
+    "ldr q8, [%[wbptr], #16]\n"
+    "fmla v11.4s, v13.4s, v3.4s\n"
+    "ldr q7, [%[wbptr], #32]\n"
+    "fmla v10.4s, v12.4s, v0.4s\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "fmla v9.4s, v20.4s, v4.4s\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "fmla v11.4s, v17.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "str q10, [x22]\n"
+    "mov v12.16b, v14.16b\n"
+    "fmla v9.4s, v17.4s, v6.4s\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "str q11, [%[outptr0], %[output_col_stride1]]\n"
+    "mov v10.16b, v14.16b\n"
+    "mov v11.16b, v14.16b\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "ldr q6, [%[wbptr], #48]\n"
+    "ldr q15, [%[inptr0]]\n"
+    "add x23, x23, #16\n"
+    "fmla v12.4s, v15.4s, v8.4s\n"
+    "ldr q20, [x23]\n"
+    "fmla v9.4s, v18.4s, v3.4s\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "ldr q13, [%[inptr0], %[input_col_stride1]]\n"
+    "add x24, x24, #16\n"
+    "fmla v12.4s, v20.4s, v5.4s\n"
+    "ldr q17, [x24]\n"
+    "fmla v9.4s, v16.4s, v0.4s\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "fmla v10.4s, v17.4s, v8.4s\n"
+    "ldr q16, [x23, %[input_col_stride1]]\n"
+    "fmla v12.4s, v13.4s, v7.4s\n"
+    "ldr q18, [%[inptr0], x19]\n"
+    "str q9, [x22, %[output_col_stride1]]\n"
+    "add x25, x25, #16\n"
+    "mov v9.16b, v14.16b\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "fmla v12.4s, v17.4s, v2.4s\n"
+    "ldr q14, [x25]\n"
+    "ldr q15, [x24, %[input_col_stride1]]\n"
+    "add x26, x26, #16\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "add x22, x22, #16\n"
+    "subs x28, x28, #1\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v11.4s, v18.4s, v8.4s\n"
+    "ldr q19, [x23, x19]\n"
+    "fmla v10.4s, v14.4s, v5.4s\n"
+    "ldr q20, [%[inptr0], x20]\n"
+    "fmla v12.4s, v15.4s, v1.4s\n"
+    "ldr q14, [x26]\n"
+    "fmla v11.4s, v19.4s, v5.4s\n"
+    "ldr q13, [x25, %[input_col_stride1]]\n"
+    "fmla v10.4s, v15.4s, v7.4s\n"
+    "ldr q17, [x24, x19]\n"
+    "fmla v12.4s, v19.4s, v3.4s\n"
+    "ldr q19, [x23, x20]\n"
+    "fmla v11.4s, v20.4s, v7.4s\n"
+    "ldr q18, [%[inptr0], x21]\n"
+    "fmla v10.4s, v14.4s, v2.4s\n"
+    "ldr q16, [x26, %[input_col_stride1]]\n"
+    "fmla v12.4s, v17.4s, v0.4s\n"
+    "ldr q14, [x25, x19]\n"
+    "fmla v11.4s, v17.4s, v2.4s\n"
+    "ldr q15, [x24, x20]\n"
+    "fmla v10.4s, v13.4s, v4.4s\n"
+    "ldr q13, [x23, x21]\n"
+    "str q12, [%[outptr0]]\n"
+    "fmla v9.4s, v17.4s, v8.4s\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr q12, [x26, x19]\n"
+    "fmla v10.4s, v17.4s, v6.4s\n"
+    "ldr q20, [x25, x20]\n"
+    "fmla v9.4s, v14.4s, v5.4s\n"
+    "ldr q17, [x24, x21]\n"
+    "fmla v11.4s, v18.4s, v6.4s\n"
+    "ldr q19, [x26, x20]\n"
+    "fmla v10.4s, v16.4s, v1.4s\n"
+    "ldr q18, [x25, x21]\n"
+    "fmla v9.4s, v15.4s, v7.4s\n"
+    "ldr q16, [x26, x21]\n"
+    "fmla v11.4s, v15.4s, v1.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v10.4s, v14.4s, v3.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v12.4s, v2.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v11.4s, v13.4s, v3.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v10.4s, v12.4s, v0.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v9.4s, v20.4s, v4.4s\n"
+    "add x25, x25, #16\n"
+    "fmla v11.4s, v17.4s, v0.4s\n"
+    "add x26, x26, #16\n"
+    "str q10, [x22]\n"
+    "fmla v9.4s, v17.4s, v6.4s\n"
+    "str q11, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v9.4s, v18.4s, v3.4s\n"
+    "fmla v9.4s, v16.4s, v0.4s\n"
+    "str q9, [x22, %[output_col_stride1]]\n"
+    "add x22, x22, #16\n"
+    "4:\n"
+    "cbz x27, 7f\n"
+    "ldr s14, [%[wbptr]]\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr s8, [%[wbptr], #4]\n"
+    "mov v10.16b, v14.16b\n"
+    "ldr s7, [%[wbptr], #8]\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr s6, [%[wbptr], #12]\n"
+    "mov v9.16b, v14.16b\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "subs x27, x27, #1\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "ldr s15, [%[inptr0]]\n"
+    "ldr s20, [x23]\n"
+    "fmla v12.4s, v15.4s, v8.4s\n"
+    "ldr s13, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s17, [x24]\n"
+    "ldr s16, [x23, %[input_col_stride1]]\n"
+    "fmla v10.4s, v17.4s, v8.4s\n"
+    "ldr s18, [%[inptr0], x19]\n"
+    "fmla v12.4s, v20.4s, v5.4s\n"
+    "ldr s14, [x25]\n"
+    "ldr s15, [x24, %[input_col_stride1]]\n"
+    "fmla v12.4s, v13.4s, v7.4s\n"
+    "fmla v12.4s, v17.4s, v2.4s\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v11.4s, v18.4s, v8.4s\n"
+    "ldr s19, [x23, x19]\n"
+    "fmla v10.4s, v14.4s, v5.4s\n"
+    "ldr s20, [%[inptr0], x20]\n"
+    "fmla v12.4s, v15.4s, v1.4s\n"
+    "ldr s14, [x26]\n"
+    "fmla v11.4s, v19.4s, v5.4s\n"
+    "ldr s13, [x25, %[input_col_stride1]]\n"
+    "fmla v10.4s, v15.4s, v7.4s\n"
+    "ldr s17, [x24, x19]\n"
+    "fmla v12.4s, v19.4s, v3.4s\n"
+    "ldr s19, [x23, x20]\n"
+    "fmla v11.4s, v20.4s, v7.4s\n"
+    "ldr s18, [%[inptr0], x21]\n"
+    "fmla v10.4s, v14.4s, v2.4s\n"
+    "ldr s16, [x26, %[input_col_stride1]]\n"
+    "fmla v12.4s, v17.4s, v0.4s\n"
+    "ldr s14, [x25, x19]\n"
+    "fmla v11.4s, v17.4s, v2.4s\n"
+    "ldr s15, [x24, x20]\n"
+    "fmla v10.4s, v13.4s, v4.4s\n"
+    "ldr s13, [x23, x21]\n"
+    "str s12, [%[outptr0]]\n"
+    "fmla v9.4s, v17.4s, v8.4s\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr s12, [x26, x19]\n"
+    "fmla v10.4s, v17.4s, v6.4s\n"
+    "ldr s20, [x25, x20]\n"
+    "fmla v9.4s, v14.4s, v5.4s\n"
+    "ldr s17, [x24, x21]\n"
+    "fmla v11.4s, v18.4s, v6.4s\n"
+    "ldr s19, [x26, x20]\n"
+    "fmla v10.4s, v16.4s, v1.4s\n"
+    "ldr s18, [x25, x21]\n"
+    "fmla v9.4s, v15.4s, v7.4s\n"
+    "ldr s16, [x26, x21]\n"
+    "fmla v11.4s, v15.4s, v1.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v10.4s, v14.4s, v3.4s\n"
+    "ldr s14, [%[wbptr]]\n"
+    "fmla v9.4s, v12.4s, v2.4s\n"
+    "ldr s8, [%[wbptr], #4]\n"
+    "fmla v11.4s, v13.4s, v3.4s\n"
+    "ldr s7, [%[wbptr], #8]\n"
+    "fmla v10.4s, v12.4s, v0.4s\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "fmla v9.4s, v20.4s, v4.4s\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "fmla v11.4s, v17.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "str s10, [x22]\n"
+    "mov v12.16b, v14.16b\n"
+    "fmla v9.4s, v17.4s, v6.4s\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "str s11, [%[outptr0], %[output_col_stride1]]\n"
+    "mov v10.16b, v14.16b\n"
+    "mov v11.16b, v14.16b\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "ldr s6, [%[wbptr], #12]\n"
+    "ldr s15, [%[inptr0]]\n"
+    "add x23, x23, #4\n"
+    "fmla v12.4s, v15.4s, v8.4s\n"
+    "ldr s20, [x23]\n"
+    "fmla v9.4s, v18.4s, v3.4s\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "ldr s13, [%[inptr0], %[input_col_stride1]]\n"
+    "add x24, x24, #4\n"
+    "fmla v12.4s, v20.4s, v5.4s\n"
+    "ldr s17, [x24]\n"
+    "fmla v9.4s, v16.4s, v0.4s\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "fmla v10.4s, v17.4s, v8.4s\n"
+    "ldr s16, [x23, %[input_col_stride1]]\n"
+    "fmla v12.4s, v13.4s, v7.4s\n"
+    "ldr s18, [%[inptr0], x19]\n"
+    "str s9, [x22, %[output_col_stride1]]\n"
+    "add x25, x25, #4\n"
+    "mov v9.16b, v14.16b\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "fmla v12.4s, v17.4s, v2.4s\n"
+    "ldr s14, [x25]\n"
+    "ldr s15, [x24, %[input_col_stride1]]\n"
+    "add x26, x26, #4\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "add x22, x22, #4\n"
+    "subs x27, x27, #1\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v11.4s, v18.4s, v8.4s\n"
+    "ldr s19, [x23, x19]\n"
+    "fmla v10.4s, v14.4s, v5.4s\n"
+    "ldr s20, [%[inptr0], x20]\n"
+    "fmla v12.4s, v15.4s, v1.4s\n"
+    "ldr s14, [x26]\n"
+    "fmla v11.4s, v19.4s, v5.4s\n"
+    "ldr s13, [x25, %[input_col_stride1]]\n"
+    "fmla v10.4s, v15.4s, v7.4s\n"
+    "ldr s17, [x24, x19]\n"
+    "fmla v12.4s, v19.4s, v3.4s\n"
+    "ldr s19, [x23, x20]\n"
+    "fmla v11.4s, v20.4s, v7.4s\n"
+    "ldr s18, [%[inptr0], x21]\n"
+    "fmla v10.4s, v14.4s, v2.4s\n"
+    "ldr s16, [x26, %[input_col_stride1]]\n"
+    "fmla v12.4s, v17.4s, v0.4s\n"
+    "ldr s14, [x25, x19]\n"
+    "fmla v11.4s, v17.4s, v2.4s\n"
+    "ldr s15, [x24, x20]\n"
+    "fmla v10.4s, v13.4s, v4.4s\n"
+    "ldr s13, [x23, x21]\n"
+    "str s12, [%[outptr0]]\n"
+    "fmla v9.4s, v17.4s, v8.4s\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr s12, [x26, x19]\n"
+    "fmla v10.4s, v17.4s, v6.4s\n"
+    "ldr s20, [x25, x20]\n"
+    "fmla v9.4s, v14.4s, v5.4s\n"
+    "ldr s17, [x24, x21]\n"
+    "fmla v11.4s, v18.4s, v6.4s\n"
+    "ldr s19, [x26, x20]\n"
+    "fmla v10.4s, v16.4s, v1.4s\n"
+    "ldr s18, [x25, x21]\n"
+    "fmla v9.4s, v15.4s, v7.4s\n"
+    "ldr s16, [x26, x21]\n"
+    "fmla v11.4s, v15.4s, v1.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v10.4s, v14.4s, v3.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v12.4s, v2.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v11.4s, v13.4s, v3.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v10.4s, v12.4s, v0.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v9.4s, v20.4s, v4.4s\n"
+    "add x25, x25, #4\n"
+    "fmla v11.4s, v17.4s, v0.4s\n"
+    "add x26, x26, #4\n"
+    "str s10, [x22]\n"
+    "fmla v9.4s, v17.4s, v6.4s\n"
+    "str s11, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v9.4s, v18.4s, v3.4s\n"
+    "fmla v9.4s, v16.4s, v0.4s\n"
+    "str s9, [x22, %[output_col_stride1]]\n"
+    "add x22, x22, #4\n"
+    "7:\n"
+    : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+    : [n_channels] "r" ((long) n_channels), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
 template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x24, %[inptr0], %[input_row_stride]\n"
+    "add x27, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x19, %[outptr0], %[output_row_stride]\n"
+    "add x25, x24, %[input_row_stride]\n"
+    "add x23, x27, %[input_col_stride1]\n"
+    "and x20, %[n_channels], #3\n"
+    "add x28, x25, %[input_row_stride]\n"
+    "add x22, x23, %[input_col_stride1]\n"
+    "lsr x21, %[n_channels], #2\n"
+    "add x26, x28, %[input_row_stride]\n"
+    "cbz x21, 4f\n"
+    "1:\n"
+    "ldr q16, [%[wbptr]]\n"
+    "subs x21, x21, #1\n"
+    "mov v3.16b, v16.16b\n"
+    "ldr q4, [%[wbptr], #16]\n"
+    "mov v1.16b, v16.16b\n"
+    "ldr q5, [%[wbptr], #32]\n"
+    "mov v2.16b, v16.16b\n"
+    "ldr q12, [%[wbptr], #48]\n"
+    "mov v0.16b, v16.16b\n"
+    "ldr q11, [%[wbptr], #64]\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "ldr q6, [%[wbptr], #96]\n"
+    "ldr q9, [%[wbptr], #112]\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "ldr q7, [%[wbptr], #144]\n"
+    "ldr q21, [%[inptr0]]\n"
+    "fmla v3.4s, v21.4s, v4.4s\n"
+    "ldr q23, [x24]\n"
+    "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q14, [x25]\n"
+    "fmla v1.4s, v14.4s, v4.4s\n"
+    "ldr q13, [x24, %[input_col_stride1]]\n"
+    "fmla v3.4s, v23.4s, v11.4s\n"
+    "ldr q18, [%[inptr0], x27]\n"
+    "ldr q15, [x28]\n"
+    "ldr q22, [x25, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v5.4s\n"
+    "fmla v3.4s, v14.4s, v9.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v3.4s, v13.4s, v10.4s\n"
+    "ldr q17, [x24, x27]\n"
+    "fmla v2.4s, v18.4s, v4.4s\n"
+    "ldr q20, [%[inptr0], x23]\n"
+    "fmla v1.4s, v15.4s, v11.4s\n"
+    "ldr q19, [x26]\n"
+    "fmla v3.4s, v18.4s, v12.4s\n"
+    "ldr q13, [x28, %[input_col_stride1]]\n"
+    "fmla v2.4s, v17.4s, v11.4s\n"
+    "ldr q14, [x25, x27]\n"
+    "fmla v1.4s, v22.4s, v5.4s\n"
+    "ldr q15, [x24, x23]\n"
+    "fmla v3.4s, v22.4s, v8.4s\n"
+    "ldr q16, [%[inptr0], x22]\n"
+    "fmla v2.4s, v20.4s, v5.4s\n"
+    "ldr q20, [x26, %[input_col_stride1]]\n"
+    "fmla v1.4s, v19.4s, v9.4s\n"
+    "ldr q19, [x28, x27]\n"
+    "fmla v3.4s, v17.4s, v6.4s\n"
+    "ldr q21, [x25, x23]\n"
+    "fmla v2.4s, v14.4s, v9.4s\n"
+    "ldr q22, [x24, x22]\n"
+    "fmla v1.4s, v13.4s, v10.4s\n"
+    "ldr q23, [x26, x27]\n"
+    "fmla v3.4s, v14.4s, v7.4s\n"
+    "ldr q18, [x28, x23]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "ldr q13, [x25, x22]\n"
+    "fmla v1.4s, v14.4s, v12.4s\n"
+    "ldr q14, [x26, x23]\n"
+    "fmla v2.4s, v15.4s, v10.4s\n"
+    "ldr q17, [x28, x22]\n"
+    "fmla v0.4s, v19.4s, v11.4s\n"
+    "ldr q15, [x26, x22]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v2.4s, v16.4s, v12.4s\n"
+    "ldr q16, [%[wbptr]]\n"
+    "fmla v0.4s, v21.4s, v5.4s\n"
+    "ldr q4, [%[wbptr], #16]\n"
+    "fmla v1.4s, v19.4s, v6.4s\n"
+    "ldr q11, [%[wbptr], #64]\n"
+    "fmla v2.4s, v21.4s, v8.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v23.4s, v9.4s\n"
+    "ldr q5, [%[wbptr], #32]\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v2.4s, v22.4s, v6.4s\n"
+    "ldr q21, [%[inptr0]]\n"
+    "fmla v0.4s, v18.4s, v10.4s\n"
+    "ldr q9, [%[wbptr], #112]\n"
+    "movi v20.16b, #0\n"
+    "ldr q19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v13.4s, v7.4s\n"
+    "ldr q18, [%[inptr0], x27]\n"
+    "fmla v0.4s, v13.4s, v12.4s\n"
+    "ldr q10, [%[wbptr], #80]\n"
+    "fmax v3.4s, v3.4s, v20.4s\n"
+    "add x24, x24, #16\n"
+    "fmax v2.4s, v2.4s, v20.4s\n"
+    "ldr q23, [x24]\n"
+    "str q3, [%[outptr0]]\n"
+    "fmla v0.4s, v14.4s, v8.4s\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v1.4s, v1.4s, v20.4s\n"
+    "mov v3.16b, v16.16b\n"
+    "ldr q12, [%[wbptr], #48]\n"
+    "str q1, [x19]\n"
+    "fmla v0.4s, v17.4s, v6.4s\n"
+    "mov v1.16b, v16.16b\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "mov v2.16b, v16.16b\n"
+    "ldr q13, [x24, %[input_col_stride1]]\n"
+    "fmla v0.4s, v15.4s, v7.4s\n"
+    "ldr q6, [%[wbptr], #96]\n"
+    "fmla v3.4s, v21.4s, v4.4s\n"
+    "add x25, x25, #16\n"
+    "ldr q14, [x25]\n"
+    "add x28, x28, #16\n"
+    "fmax v0.4s, v0.4s, v20.4s\n"
+    "ldr q7, [%[wbptr], #144]\n"
+    "fmla v3.4s, v23.4s, v11.4s\n"
+    "ldr q15, [x28]\n"
+    "str q0, [x19, %[output_col_stride1]]\n"
+    "fmla v1.4s, v14.4s, v4.4s\n"
+    "mov v0.16b, v16.16b\n"
+    "ldr q22, [x25, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v5.4s\n"
+    "add x26, x26, #16\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "add x19, x19, #16\n"
+    "subs x21, x21, #1\n"
+    "fmla v3.4s, v14.4s, v9.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v3.4s, v13.4s, v10.4s\n"
+    "ldr q17, [x24, x27]\n"
+    "fmla v2.4s, v18.4s, v4.4s\n"
+    "ldr q20, [%[inptr0], x23]\n"
+    "fmla v1.4s, v15.4s, v11.4s\n"
+    "ldr q19, [x26]\n"
+    "fmla v3.4s, v18.4s, v12.4s\n"
+    "ldr q13, [x28, %[input_col_stride1]]\n"
+    "fmla v2.4s, v17.4s, v11.4s\n"
+    "ldr q14, [x25, x27]\n"
+    "fmla v1.4s, v22.4s, v5.4s\n"
+    "ldr q15, [x24, x23]\n"
+    "fmla v3.4s, v22.4s, v8.4s\n"
+    "ldr q16, [%[inptr0], x22]\n"
+    "fmla v2.4s, v20.4s, v5.4s\n"
+    "ldr q20, [x26, %[input_col_stride1]]\n"
+    "fmla v1.4s, v19.4s, v9.4s\n"
+    "ldr q19, [x28, x27]\n"
+    "fmla v3.4s, v17.4s, v6.4s\n"
+    "ldr q21, [x25, x23]\n"
+    "fmla v2.4s, v14.4s, v9.4s\n"
+    "ldr q22, [x24, x22]\n"
+    "fmla v1.4s, v13.4s, v10.4s\n"
+    "ldr q23, [x26, x27]\n"
+    "fmla v3.4s, v14.4s, v7.4s\n"
+    "ldr q18, [x28, x23]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "ldr q13, [x25, x22]\n"
+    "fmla v1.4s, v14.4s, v12.4s\n"
+    "ldr q14, [x26, x23]\n"
+    "fmla v2.4s, v15.4s, v10.4s\n"
+    "ldr q17, [x28, x22]\n"
+    "fmla v0.4s, v19.4s, v11.4s\n"
+    "ldr q15, [x26, x22]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v2.4s, v16.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v21.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v1.4s, v19.4s, v6.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v2.4s, v21.4s, v8.4s\n"
+    "add x25, x25, #16\n"
+    "fmla v0.4s, v23.4s, v9.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v2.4s, v22.4s, v6.4s\n"
+    "movi v20.16b, #0\n"
+    "fmla v0.4s, v18.4s, v10.4s\n"
+    "fmax v3.4s, v3.4s, v20.4s\n"
+    "fmla v2.4s, v13.4s, v7.4s\n"
+    "fmax v1.4s, v1.4s, v20.4s\n"
+    "str q3, [%[outptr0]]\n"
+    "fmla v0.4s, v13.4s, v12.4s\n"
+    "str q1, [x19]\n"
+    "fmax v2.4s, v2.4s, v20.4s\n"
+    "fmla v0.4s, v14.4s, v8.4s\n"
+    "str q2, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v0.4s, v17.4s, v6.4s\n"
+    "fmla v0.4s, v15.4s, v7.4s\n"
+    "fmax v0.4s, v0.4s, v20.4s\n"
+    "str q0, [x19, %[output_col_stride1]]\n"
+    "add x19, x19, #16\n"
+    "4:\n"
+    "cbz x20, 7f\n"
+    "ldr s16, [%[wbptr]]\n"
+    "mov v3.16b, v16.16b\n"
+    "ldr s4, [%[wbptr], #4]\n"
+    "mov v1.16b, v16.16b\n"
+    "ldr s5, [%[wbptr], #8]\n"
+    "mov v2.16b, v16.16b\n"
+    "ldr s12, [%[wbptr], #12]\n"
+    "mov v0.16b, v16.16b\n"
+    "ldr s11, [%[wbptr], #16]\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "subs x20, x20, #1\n"
+    "ldr s6, [%[wbptr], #24]\n"
+    "ldr s9, [%[wbptr], #28]\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "ldr s7, [%[wbptr], #36]\n"
+    "ldr s21, [%[inptr0]]\n"
+    "ldr s23, [x24]\n"
+    "fmla v3.4s, v21.4s, v4.4s\n"
+    "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s14, [x25]\n"
+    "ldr s13, [x24, %[input_col_stride1]]\n"
+    "fmla v1.4s, v14.4s, v4.4s\n"
+    "ldr s18, [%[inptr0], x27]\n"
+    "fmla v3.4s, v23.4s, v11.4s\n"
+    "ldr s15, [x28]\n"
+    "ldr s22, [x25, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v5.4s\n"
+    "fmla v3.4s, v14.4s, v9.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v3.4s, v13.4s, v10.4s\n"
+    "ldr s17, [x24, x27]\n"
+    "fmla v2.4s, v18.4s, v4.4s\n"
+    "ldr s20, [%[inptr0], x23]\n"
+    "fmla v1.4s, v15.4s, v11.4s\n"
+    "ldr s19, [x26]\n"
+    "fmla v3.4s, v18.4s, v12.4s\n"
+    "ldr s13, [x28, %[input_col_stride1]]\n"
+    "fmla v2.4s, v17.4s, v11.4s\n"
+    "ldr s14, [x25, x27]\n"
+    "fmla v1.4s, v22.4s, v5.4s\n"
+    "ldr s15, [x24, x23]\n"
+    "fmla v3.4s, v22.4s, v8.4s\n"
+    "ldr s16, [%[inptr0], x22]\n"
+    "fmla v2.4s, v20.4s, v5.4s\n"
+    "ldr s20, [x26, %[input_col_stride1]]\n"
+    "fmla v1.4s, v19.4s, v9.4s\n"
+    "ldr s19, [x28, x27]\n"
+    "fmla v3.4s, v17.4s, v6.4s\n"
+    "ldr s21, [x25, x23]\n"
+    "fmla v2.4s, v14.4s, v9.4s\n"
+    "ldr s22, [x24, x22]\n"
+    "fmla v1.4s, v13.4s, v10.4s\n"
+    "ldr s23, [x26, x27]\n"
+    "fmla v3.4s, v14.4s, v7.4s\n"
+    "ldr s18, [x28, x23]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "ldr s13, [x25, x22]\n"
+    "fmla v1.4s, v14.4s, v12.4s\n"
+    "ldr s14, [x26, x23]\n"
+    "fmla v2.4s, v15.4s, v10.4s\n"
+    "ldr s17, [x28, x22]\n"
+    "fmla v0.4s, v19.4s, v11.4s\n"
+    "ldr s15, [x26, x22]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v2.4s, v16.4s, v12.4s\n"
+    "ldr s16, [%[wbptr]]\n"
+    "fmla v0.4s, v21.4s, v5.4s\n"
+    "ldr s4, [%[wbptr], #4]\n"
+    "fmla v1.4s, v19.4s, v6.4s\n"
+    "ldr s11, [%[wbptr], #16]\n"
+    "fmla v2.4s, v21.4s, v8.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v23.4s, v9.4s\n"
+    "ldr s5, [%[wbptr], #8]\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v2.4s, v22.4s, v6.4s\n"
+    "ldr s21, [%[inptr0]]\n"
+    "fmla v0.4s, v18.4s, v10.4s\n"
+    "ldr s9, [%[wbptr], #28]\n"
+    "movi v20.16b, #0\n"
+    "ldr s19, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v13.4s, v7.4s\n"
+    "ldr s18, [%[inptr0], x27]\n"
+    "fmla v0.4s, v13.4s, v12.4s\n"
+    "ldr s10, [%[wbptr], #20]\n"
+    "fmax v3.4s, v3.4s, v20.4s\n"
+    "add x24, x24, #4\n"
+    "fmax v2.4s, v2.4s, v20.4s\n"
+    "ldr s23, [x24]\n"
+    "str s3, [%[outptr0]]\n"
+    "fmla v0.4s, v14.4s, v8.4s\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v1.4s, v1.4s, v20.4s\n"
+    "mov v3.16b, v16.16b\n"
+    "ldr s12, [%[wbptr], #12]\n"
+    "str s1, [x19]\n"
+    "fmla v0.4s, v17.4s, v6.4s\n"
+    "mov v1.16b, v16.16b\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "mov v2.16b, v16.16b\n"
+    "ldr s13, [x24, %[input_col_stride1]]\n"
+    "fmla v0.4s, v15.4s, v7.4s\n"
+    "ldr s6, [%[wbptr], #24]\n"
+    "fmla v3.4s, v21.4s, v4.4s\n"
+    "add x25, x25, #4\n"
+    "ldr s14, [x25]\n"
+    "add x28, x28, #4\n"
+    "fmax v0.4s, v0.4s, v20.4s\n"
+    "ldr s7, [%[wbptr], #36]\n"
+    "fmla v3.4s, v23.4s, v11.4s\n"
+    "ldr s15, [x28]\n"
+    "str s0, [x19, %[output_col_stride1]]\n"
+    "fmla v1.4s, v14.4s, v4.4s\n"
+    "mov v0.16b, v16.16b\n"
+    "ldr s22, [x25, %[input_col_stride1]]\n"
+    "fmla v3.4s, v19.4s, v5.4s\n"
+    "add x26, x26, #4\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "add x19, x19, #4\n"
+    "subs x20, x20, #1\n"
+    "fmla v3.4s, v14.4s, v9.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v3.4s, v13.4s, v10.4s\n"
+    "ldr s17, [x24, x27]\n"
+    "fmla v2.4s, v18.4s, v4.4s\n"
+    "ldr s20, [%[inptr0], x23]\n"
+    "fmla v1.4s, v15.4s, v11.4s\n"
+    "ldr s19, [x26]\n"
+    "fmla v3.4s, v18.4s, v12.4s\n"
+    "ldr s13, [x28, %[input_col_stride1]]\n"
+    "fmla v2.4s, v17.4s, v11.4s\n"
+    "ldr s14, [x25, x27]\n"
+    "fmla v1.4s, v22.4s, v5.4s\n"
+    "ldr s15, [x24, x23]\n"
+    "fmla v3.4s, v22.4s, v8.4s\n"
+    "ldr s16, [%[inptr0], x22]\n"
+    "fmla v2.4s, v20.4s, v5.4s\n"
+    "ldr s20, [x26, %[input_col_stride1]]\n"
+    "fmla v1.4s, v19.4s, v9.4s\n"
+    "ldr s19, [x28, x27]\n"
+    "fmla v3.4s, v17.4s, v6.4s\n"
+    "ldr s21, [x25, x23]\n"
+    "fmla v2.4s, v14.4s, v9.4s\n"
+    "ldr s22, [x24, x22]\n"
+    "fmla v1.4s, v13.4s, v10.4s\n"
+    "ldr s23, [x26, x27]\n"
+    "fmla v3.4s, v14.4s, v7.4s\n"
+    "ldr s18, [x28, x23]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "ldr s13, [x25, x22]\n"
+    "fmla v1.4s, v14.4s, v12.4s\n"
+    "ldr s14, [x26, x23]\n"
+    "fmla v2.4s, v15.4s, v10.4s\n"
+    "ldr s17, [x28, x22]\n"
+    "fmla v0.4s, v19.4s, v11.4s\n"
+    "ldr s15, [x26, x22]\n"
+    "fmla v1.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v2.4s, v16.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v0.4s, v21.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v1.4s, v19.4s, v6.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v2.4s, v21.4s, v8.4s\n"
+    "add x25, x25, #4\n"
+    "fmla v0.4s, v23.4s, v9.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v2.4s, v22.4s, v6.4s\n"
+    "movi v20.16b, #0\n"
+    "fmla v0.4s, v18.4s, v10.4s\n"
+    "fmax v3.4s, v3.4s, v20.4s\n"
+    "fmla v2.4s, v13.4s, v7.4s\n"
+    "fmax v1.4s, v1.4s, v20.4s\n"
+    "str s3, [%[outptr0]]\n"
+    "fmla v0.4s, v13.4s, v12.4s\n"
+    "str s1, [x19]\n"
+    "fmax v2.4s, v2.4s, v20.4s\n"
+    "fmla v0.4s, v14.4s, v8.4s\n"
+    "str s2, [%[outptr0], %[output_col_stride1]]\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v0.4s, v17.4s, v6.4s\n"
+    "fmla v0.4s, v15.4s, v7.4s\n"
+    "fmax v0.4s, v0.4s, v20.4s\n"
+    "str s0, [x19, %[output_col_stride1]]\n"
+    "add x19, x19, #4\n"
+    "7:\n"
+    : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input)
+    : [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
 template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
 template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x21, %[inptr0], %[input_row_stride]\n"
+    "add x23, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x24, %[outptr0], %[output_row_stride]\n"
+    "add x28, x21, %[input_row_stride]\n"
+    "add x26, x23, %[input_col_stride1]\n"
+    "and x19, %[n_channels], #3\n"
+    "add x27, x28, %[input_row_stride]\n"
+    "add x25, x26, %[input_col_stride1]\n"
+    "lsr x20, %[n_channels], #2\n"
+    "add x22, x27, %[input_row_stride]\n"
+    "cbz x20, 4f\n"
+    "1:\n"
+    "ldr q14, [%[wbptr]]\n"
+    "subs x20, x20, #1\n"
+    "mov v5.16b, v14.16b\n"
+    "ldr q0, [%[wbptr], #16]\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr q1, [%[wbptr], #32]\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr q2, [%[wbptr], #48]\n"
+    "mov v10.16b, v14.16b\n"
+    "ldr q6, [%[wbptr], #64]\n"
+    "ldr q3, [%[wbptr], #80]\n"
+    "ldr q7, [%[wbptr], #96]\n"
+    "ldr q4, [%[wbptr], #112]\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "ldr q9, [%[wbptr], #144]\n"
+    "ldr q19, [%[inptr0]]\n"
+    "fmla v5.4s, v19.4s, v0.4s\n"
+    "ldr q15, [x21]\n"
+    "ldr q21, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q16, [x28]\n"
+    "fmla v11.4s, v16.4s, v0.4s\n"
+    "ldr q23, [x21, %[input_col_stride1]]\n"
+    "fmla v5.4s, v15.4s, v6.4s\n"
+    "ldr q18, [%[inptr0], x23]\n"
+    "ldr q17, [x27]\n"
+    "ldr q13, [x28, %[input_col_stride1]]\n"
+    "fmla v5.4s, v21.4s, v1.4s\n"
+    "fmla v5.4s, v16.4s, v4.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v5.4s, v23.4s, v3.4s\n"
+    "ldr q21, [x21, x23]\n"
+    "fmla v12.4s, v18.4s, v0.4s\n"
+    "ldr q20, [%[inptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v6.4s\n"
+    "ldr q19, [x22]\n"
+    "fmla v5.4s, v18.4s, v2.4s\n"
+    "ldr q15, [x27, %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v6.4s\n"
+    "ldr q16, [x28, x23]\n"
+    "fmla v11.4s, v13.4s, v1.4s\n"
+    "ldr q17, [x21, x26]\n"
+    "fmla v5.4s, v13.4s, v8.4s\n"
+    "ldr q14, [%[inptr0], x25]\n"
+    "fmla v12.4s, v20.4s, v1.4s\n"
+    "ldr q20, [x22, %[input_col_stride1]]\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr q19, [x27, x23]\n"
+    "fmla v5.4s, v21.4s, v7.4s\n"
+    "ldr q22, [x28, x26]\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "ldr q21, [x21, x25]\n"
+    "fmla v11.4s, v15.4s, v3.4s\n"
+    "ldr q23, [x22, x23]\n"
+    "fmla v5.4s, v16.4s, v9.4s\n"
+    "ldr q18, [x27, x26]\n"
+    "fmla v10.4s, v16.4s, v0.4s\n"
+    "ldr q15, [x28, x25]\n"
+    "fmla v11.4s, v16.4s, v2.4s\n"
+    "ldr q16, [x22, x26]\n"
+    "fmla v12.4s, v17.4s, v3.4s\n"
+    "ldr q17, [x27, x25]\n"
+    "fmla v10.4s, v19.4s, v6.4s\n"
+    "ldr q13, [x22, x25]\n"
+    "fmla v11.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v12.4s, v14.4s, v2.4s\n"
+    "ldr q14, [%[wbptr]]\n"
+    "fmla v10.4s, v22.4s, v1.4s\n"
+    "ldr q0, [%[wbptr], #16]\n"
+    "fmla v11.4s, v19.4s, v7.4s\n"
+    "ldr q6, [%[wbptr], #64]\n"
+    "fmla v12.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v10.4s, v23.4s, v4.4s\n"
+    "ldr q1, [%[wbptr], #32]\n"
+    "fmla v11.4s, v23.4s, v9.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v12.4s, v21.4s, v7.4s\n"
+    "ldr q19, [%[inptr0]]\n"
+    "fmla v10.4s, v18.4s, v3.4s\n"
+    "ldr q4, [%[wbptr], #112]\n"
+    "movi v20.16b, #0\n"
+    "ldr q21, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v12.4s, v15.4s, v9.4s\n"
+    "ldr q18, [%[inptr0], x23]\n"
+    "fmla v10.4s, v15.4s, v2.4s\n"
+    "ldr q3, [%[wbptr], #80]\n"
+    "fmov v22.4s, #6.0\n"
+    "add x21, x21, #16\n"
+    "fmax v5.4s, v5.4s, v20.4s\n"
+    "ldr q15, [x21]\n"
+    "fmla v10.4s, v16.4s, v8.4s\n"
+    "ldr q2, [%[wbptr], #48]\n"
+    "fmin v5.4s, v5.4s, v22.4s\n"
+    "ldr q23, [x21, %[input_col_stride1]]\n"
+    "fmax v12.4s, v12.4s, v20.4s\n"
+    "add x28, x28, #16\n"
+    "str q5, [%[outptr0]]\n"
+    "fmla v10.4s, v17.4s, v7.4s\n"
+    "fmin v12.4s, v12.4s, v22.4s\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "fmax v11.4s, v11.4s, v20.4s\n"
+    "ldr q16, [x28]\n"
+    "str q12, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v13.4s, v9.4s\n"
+    "fmin v11.4s, v11.4s, v22.4s\n"
+    "ldr q7, [%[wbptr], #96]\n"
+    "mov v5.16b, v14.16b\n"
+    "ldr q13, [x28, %[input_col_stride1]]\n"
+    "str q11, [x24]\n"
+    "fmax v10.4s, v10.4s, v20.4s\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr q9, [%[wbptr], #144]\n"
+    "fmin v10.4s, v10.4s, v22.4s\n"
+    "add x27, x27, #16\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr q17, [x27]\n"
+    "str q10, [x24, %[output_col_stride1]]\n"
+    "fmla v5.4s, v19.4s, v0.4s\n"
+    "mov v10.16b, v14.16b\n"
+    "add x22, x22, #16\n"
+    "fmla v11.4s, v16.4s, v0.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v5.4s, v15.4s, v6.4s\n"
+    "add x24, x24, #16\n"
+    "subs x20, x20, #1\n"
+    "fmla v5.4s, v21.4s, v1.4s\n"
+    "fmla v5.4s, v16.4s, v4.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v5.4s, v23.4s, v3.4s\n"
+    "ldr q21, [x21, x23]\n"
+    "fmla v12.4s, v18.4s, v0.4s\n"
+    "ldr q20, [%[inptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v6.4s\n"
+    "ldr q19, [x22]\n"
+    "fmla v5.4s, v18.4s, v2.4s\n"
+    "ldr q15, [x27, %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v6.4s\n"
+    "ldr q16, [x28, x23]\n"
+    "fmla v11.4s, v13.4s, v1.4s\n"
+    "ldr q17, [x21, x26]\n"
+    "fmla v5.4s, v13.4s, v8.4s\n"
+    "ldr q14, [%[inptr0], x25]\n"
+    "fmla v12.4s, v20.4s, v1.4s\n"
+    "ldr q20, [x22, %[input_col_stride1]]\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr q19, [x27, x23]\n"
+    "fmla v5.4s, v21.4s, v7.4s\n"
+    "ldr q22, [x28, x26]\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "ldr q21, [x21, x25]\n"
+    "fmla v11.4s, v15.4s, v3.4s\n"
+    "ldr q23, [x22, x23]\n"
+    "fmla v5.4s, v16.4s, v9.4s\n"
+    "ldr q18, [x27, x26]\n"
+    "fmla v10.4s, v16.4s, v0.4s\n"
+    "ldr q15, [x28, x25]\n"
+    "fmla v11.4s, v16.4s, v2.4s\n"
+    "ldr q16, [x22, x26]\n"
+    "fmla v12.4s, v17.4s, v3.4s\n"
+    "ldr q17, [x27, x25]\n"
+    "fmla v10.4s, v19.4s, v6.4s\n"
+    "ldr q13, [x22, x25]\n"
+    "fmla v11.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v12.4s, v14.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v10.4s, v22.4s, v1.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v11.4s, v19.4s, v7.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v12.4s, v22.4s, v8.4s\n"
+    "add x28, x28, #16\n"
+    "fmla v10.4s, v23.4s, v4.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v11.4s, v23.4s, v9.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v12.4s, v21.4s, v7.4s\n"
+    "movi v20.16b, #0\n"
+    "fmla v10.4s, v18.4s, v3.4s\n"
+    "fmov v22.4s, #6.0\n"
+    "fmax v5.4s, v5.4s, v20.4s\n"
+    "fmax v11.4s, v11.4s, v20.4s\n"
+    "fmla v12.4s, v15.4s, v9.4s\n"
+    "fmla v10.4s, v15.4s, v2.4s\n"
+    "fmin v5.4s, v5.4s, v22.4s\n"
+    "fmin v11.4s, v11.4s, v22.4s\n"
+    "fmax v12.4s, v12.4s, v20.4s\n"
+    "str q5, [%[outptr0]]\n"
+    "str q11, [x24]\n"
+    "fmla v10.4s, v16.4s, v8.4s\n"
+    "fmin v12.4s, v12.4s, v22.4s\n"
+    "str q12, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v17.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v10.4s, v13.4s, v9.4s\n"
+    "fmax v10.4s, v10.4s, v20.4s\n"
+    "fmin v10.4s, v10.4s, v22.4s\n"
+    "str q10, [x24, %[output_col_stride1]]\n"
+    "add x24, x24, #16\n"
+    "4:\n"
+    "cbz x19, 7f\n"
+    "ldr s14, [%[wbptr]]\n"
+    "mov v5.16b, v14.16b\n"
+    "ldr s0, [%[wbptr], #4]\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr s1, [%[wbptr], #8]\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr s2, [%[wbptr], #12]\n"
+    "mov v10.16b, v14.16b\n"
+    "ldr s6, [%[wbptr], #16]\n"
+    "ldr s3, [%[wbptr], #20]\n"
+    "subs x19, x19, #1\n"
+    "ldr s7, [%[wbptr], #24]\n"
+    "ldr s4, [%[wbptr], #28]\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "ldr s9, [%[wbptr], #36]\n"
+    "ldr s19, [%[inptr0]]\n"
+    "ldr s15, [x21]\n"
+    "fmla v5.4s, v19.4s, v0.4s\n"
+    "ldr s21, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s16, [x28]\n"
+    "ldr s23, [x21, %[input_col_stride1]]\n"
+    "fmla v11.4s, v16.4s, v0.4s\n"
+    "ldr s18, [%[inptr0], x23]\n"
+    "fmla v5.4s, v15.4s, v6.4s\n"
+    "ldr s17, [x27]\n"
+    "ldr s13, [x28, %[input_col_stride1]]\n"
+    "fmla v5.4s, v21.4s, v1.4s\n"
+    "fmla v5.4s, v16.4s, v4.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v5.4s, v23.4s, v3.4s\n"
+    "ldr s21, [x21, x23]\n"
+    "fmla v12.4s, v18.4s, v0.4s\n"
+    "ldr s20, [%[inptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v6.4s\n"
+    "ldr s19, [x22]\n"
+    "fmla v5.4s, v18.4s, v2.4s\n"
+    "ldr s15, [x27, %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v6.4s\n"
+    "ldr s16, [x28, x23]\n"
+    "fmla v11.4s, v13.4s, v1.4s\n"
+    "ldr s17, [x21, x26]\n"
+    "fmla v5.4s, v13.4s, v8.4s\n"
+    "ldr s14, [%[inptr0], x25]\n"
+    "fmla v12.4s, v20.4s, v1.4s\n"
+    "ldr s20, [x22, %[input_col_stride1]]\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr s19, [x27, x23]\n"
+    "fmla v5.4s, v21.4s, v7.4s\n"
+    "ldr s22, [x28, x26]\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "ldr s21, [x21, x25]\n"
+    "fmla v11.4s, v15.4s, v3.4s\n"
+    "ldr s23, [x22, x23]\n"
+    "fmla v5.4s, v16.4s, v9.4s\n"
+    "ldr s18, [x27, x26]\n"
+    "fmla v10.4s, v16.4s, v0.4s\n"
+    "ldr s15, [x28, x25]\n"
+    "fmla v11.4s, v16.4s, v2.4s\n"
+    "ldr s16, [x22, x26]\n"
+    "fmla v12.4s, v17.4s, v3.4s\n"
+    "ldr s17, [x27, x25]\n"
+    "fmla v10.4s, v19.4s, v6.4s\n"
+    "ldr s13, [x22, x25]\n"
+    "fmla v11.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v12.4s, v14.4s, v2.4s\n"
+    "ldr s14, [%[wbptr]]\n"
+    "fmla v10.4s, v22.4s, v1.4s\n"
+    "ldr s0, [%[wbptr], #4]\n"
+    "fmla v11.4s, v19.4s, v7.4s\n"
+    "ldr s6, [%[wbptr], #16]\n"
+    "fmla v12.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v10.4s, v23.4s, v4.4s\n"
+    "ldr s1, [%[wbptr], #8]\n"
+    "fmla v11.4s, v23.4s, v9.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v12.4s, v21.4s, v7.4s\n"
+    "ldr s19, [%[inptr0]]\n"
+    "fmla v10.4s, v18.4s, v3.4s\n"
+    "ldr s4, [%[wbptr], #28]\n"
+    "movi v20.16b, #0\n"
+    "ldr s21, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v12.4s, v15.4s, v9.4s\n"
+    "ldr s18, [%[inptr0], x23]\n"
+    "fmla v10.4s, v15.4s, v2.4s\n"
+    "ldr s3, [%[wbptr], #20]\n"
+    "fmov v22.4s, #6.0\n"
+    "add x21, x21, #4\n"
+    "fmax v5.4s, v5.4s, v20.4s\n"
+    "ldr s15, [x21]\n"
+    "fmla v10.4s, v16.4s, v8.4s\n"
+    "ldr s2, [%[wbptr], #12]\n"
+    "fmin v5.4s, v5.4s, v22.4s\n"
+    "ldr s23, [x21, %[input_col_stride1]]\n"
+    "fmax v12.4s, v12.4s, v20.4s\n"
+    "add x28, x28, #4\n"
+    "str s5, [%[outptr0]]\n"
+    "fmla v10.4s, v17.4s, v7.4s\n"
+    "fmin v12.4s, v12.4s, v22.4s\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "fmax v11.4s, v11.4s, v20.4s\n"
+    "ldr s16, [x28]\n"
+    "str s12, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v13.4s, v9.4s\n"
+    "fmin v11.4s, v11.4s, v22.4s\n"
+    "ldr s7, [%[wbptr], #24]\n"
+    "mov v5.16b, v14.16b\n"
+    "ldr s13, [x28, %[input_col_stride1]]\n"
+    "str s11, [x24]\n"
+    "fmax v10.4s, v10.4s, v20.4s\n"
+    "mov v11.16b, v14.16b\n"
+    "ldr s9, [%[wbptr], #36]\n"
+    "fmin v10.4s, v10.4s, v22.4s\n"
+    "add x27, x27, #4\n"
+    "mov v12.16b, v14.16b\n"
+    "ldr s17, [x27]\n"
+    "str s10, [x24, %[output_col_stride1]]\n"
+    "fmla v5.4s, v19.4s, v0.4s\n"
+    "mov v10.16b, v14.16b\n"
+    "add x22, x22, #4\n"
+    "fmla v11.4s, v16.4s, v0.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v5.4s, v15.4s, v6.4s\n"
+    "add x24, x24, #4\n"
+    "subs x19, x19, #1\n"
+    "fmla v5.4s, v21.4s, v1.4s\n"
+    "fmla v5.4s, v16.4s, v4.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v5.4s, v23.4s, v3.4s\n"
+    "ldr s21, [x21, x23]\n"
+    "fmla v12.4s, v18.4s, v0.4s\n"
+    "ldr s20, [%[inptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v6.4s\n"
+    "ldr s19, [x22]\n"
+    "fmla v5.4s, v18.4s, v2.4s\n"
+    "ldr s15, [x27, %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v6.4s\n"
+    "ldr s16, [x28, x23]\n"
+    "fmla v11.4s, v13.4s, v1.4s\n"
+    "ldr s17, [x21, x26]\n"
+    "fmla v5.4s, v13.4s, v8.4s\n"
+    "ldr s14, [%[inptr0], x25]\n"
+    "fmla v12.4s, v20.4s, v1.4s\n"
+    "ldr s20, [x22, %[input_col_stride1]]\n"
+    "fmla v11.4s, v19.4s, v4.4s\n"
+    "ldr s19, [x27, x23]\n"
+    "fmla v5.4s, v21.4s, v7.4s\n"
+    "ldr s22, [x28, x26]\n"
+    "fmla v12.4s, v16.4s, v4.4s\n"
+    "ldr s21, [x21, x25]\n"
+    "fmla v11.4s, v15.4s, v3.4s\n"
+    "ldr s23, [x22, x23]\n"
+    "fmla v5.4s, v16.4s, v9.4s\n"
+    "ldr s18, [x27, x26]\n"
+    "fmla v10.4s, v16.4s, v0.4s\n"
+    "ldr s15, [x28, x25]\n"
+    "fmla v11.4s, v16.4s, v2.4s\n"
+    "ldr s16, [x22, x26]\n"
+    "fmla v12.4s, v17.4s, v3.4s\n"
+    "ldr s17, [x27, x25]\n"
+    "fmla v10.4s, v19.4s, v6.4s\n"
+    "ldr s13, [x22, x25]\n"
+    "fmla v11.4s, v20.4s, v8.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v12.4s, v14.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v10.4s, v22.4s, v1.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v11.4s, v19.4s, v7.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v12.4s, v22.4s, v8.4s\n"
+    "add x28, x28, #4\n"
+    "fmla v10.4s, v23.4s, v4.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v11.4s, v23.4s, v9.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v12.4s, v21.4s, v7.4s\n"
+    "movi v20.16b, #0\n"
+    "fmla v10.4s, v18.4s, v3.4s\n"
+    "fmov v22.4s, #6.0\n"
+    "fmax v5.4s, v5.4s, v20.4s\n"
+    "fmax v11.4s, v11.4s, v20.4s\n"
+    "fmla v12.4s, v15.4s, v9.4s\n"
+    "fmla v10.4s, v15.4s, v2.4s\n"
+    "fmin v5.4s, v5.4s, v22.4s\n"
+    "fmin v11.4s, v11.4s, v22.4s\n"
+    "fmax v12.4s, v12.4s, v20.4s\n"
+    "str s5, [%[outptr0]]\n"
+    "str s11, [x24]\n"
+    "fmla v10.4s, v16.4s, v8.4s\n"
+    "fmin v12.4s, v12.4s, v22.4s\n"
+    "str s12, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v17.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v10.4s, v13.4s, v9.4s\n"
+    "fmax v10.4s, v10.4s, v20.4s\n"
+    "fmin v10.4s, v10.4s, v22.4s\n"
+    "str s10, [x24, %[output_col_stride1]]\n"
+    "add x24, x24, #4\n"
+    "7:\n"
+    : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+    : [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-  },
-};
+#endif  // __aarch64__
 
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-  },
-};
+template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>;
 
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp16_fp16.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp16_fp16.cpp
new file mode 100644
index 0000000..23a99a8
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp16_fp16.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "impl_fp16_fp16.hpp"
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+namespace depthwise
+{
+template class DepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>;
+}  // namespace depthwise
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp
index 21e8f04..2508ec7 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,931 +25,2317 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<3, 3, 3, 3, 1, 1, float, float>;
+
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float, float>;
 
 #ifdef __aarch64__
-
 template <>
 template <>
-void ConvImpl::process_tile<true, 0, 0, 0, 0, 0, 0>(
-  const int n_channels,
-  const float* const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float* const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float* const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int, const int, const int, const int, const int, const int, const int, const int
+void Conv::execute_tile<ActivationFunction::None>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
 )
 {
-  // Copy pointers
-  const float *uptr0 = inptr;
-  const float *wptr0 = weights;
-  float *vptr0 = outptr;
+  __asm __volatile(
+    "add x20, %[inptr0], %[input_row_stride]\n"
+    "add x13, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x24, %[outptr0], %[output_row_stride]\n"
+    "add x21, x20, %[input_row_stride]\n"
+    "add x14, x13, #64\n"
+    "add x15, x13, %[input_col_stride1]\n"
+    "add x22, x21, %[input_row_stride]\n"
+    "add x16, x15, #64\n"
+    "add x17, x15, %[input_col_stride1]\n"
+    "add x23, x22, %[input_row_stride]\n"
+    "add x18, x17, #64\n"
+    "add x25, x24, %[output_row_stride]\n"
+    "add x26, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x27, %[n_channels], #3\n"
+    "lsr x28, %[n_channels], #2\n"
+    "cbz x28, 4f\n"
+    "1:\n"
+    "ldr q25, [%[wbptr]]\n"
+    "subs x28, x28, #1\n"
+    "mov v17.16b, v25.16b\n"
+    "ldr q16, [%[wbptr], #16]\n"
+    "mov v13.16b, v25.16b\n"
+    "ldr q7, [%[wbptr], #32]\n"
+    "mov v15.16b, v25.16b\n"
+    "ldr q6, [%[wbptr], #48]\n"
+    "mov v10.16b, v25.16b\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "mov v12.16b, v25.16b\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "mov v14.16b, v25.16b\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "mov v9.16b, v25.16b\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "mov v11.16b, v25.16b\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "mov v8.16b, v25.16b\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "ldr q26, [%[inptr0]]\n"
+    "ldr q28, [x20]\n"
+    "fmla v17.4s, v26.4s, v16.4s\n"
+    "ldr q29, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v13.4s, v28.4s, v16.4s\n"
+    "ldr q27, [x21]\n"
+    "fmla v15.4s, v29.4s, v16.4s\n"
+    "ldr q21, [x20, %[input_col_stride1]]\n"
+    "fmla v17.4s, v28.4s, v5.4s\n"
+    "ldr q20, [%[inptr0], x13]\n"
+    "ldr q23, [x22]\n"
+    "ldr q19, [x21, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "fmla v17.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [%[inptr0], x19]\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "prfm pldl1keep, [x20, x19]\n"
+    "prfm pldl1keep, [%[inptr0], x14]\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "prfm pldl1keep, [x21, x19]\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "ldr q30, [x20, x13]\n"
+    "fmla v13.4s, v27.4s, v5.4s\n"
+    "ldr q29, [%[inptr0], x15]\n"
+    "fmla v10.4s, v27.4s, v16.4s\n"
+    "ldr q28, [x23]\n"
+    "fmla v17.4s, v21.4s, v4.4s\n"
+    "ldr q24, [x22, %[input_col_stride1]]\n"
+    "fmla v13.4s, v21.4s, v7.4s\n"
+    "ldr q18, [x21, x13]\n"
+    "fmla v15.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [x20, x14]\n"
+    "fmla v12.4s, v21.4s, v16.4s\n"
+    "ldr q22, [x20, x15]\n"
+    "fmla v17.4s, v20.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v15.4s, v20.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v14.4s, v20.4s, v16.4s\n"
+    "ldr q25, [%[inptr0], x17]\n"
+    "fmla v13.4s, v23.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x19]\n"
+    "fmla v10.4s, v23.4s, v5.4s\n"
+    "ldr q26, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v19.4s, v1.4s\n"
+    "prfm pldl1keep, [x21, x14]\n"
+    "fmla v13.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v15.4s, v19.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v10.4s, v19.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, x19]\n"
+    "fmla v12.4s, v19.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, x14]\n"
+    "fmla v9.4s, v19.4s, v16.4s\n"
+    "ldr q27, [x22, x13]\n"
+    "fmla v17.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v13.4s, v30.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v15.4s, v30.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x14]\n"
+    "fmla v12.4s, v30.4s, v7.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v14.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x21, x18]\n"
+    "fmla v11.4s, v30.4s, v16.4s\n"
+    "ldr q21, [x21, x15]\n"
+    "fmla v15.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v14.4s, v29.4s, v7.4s\n"
+    "ldr q20, [x20, x17]\n"
+    "fmla v10.4s, v28.4s, v2.4s\n"
+    "ldr q19, [x23, x13]\n"
+    "fmla v13.4s, v24.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v12.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v10.4s, v24.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v9.4s, v24.4s, v5.4s\n"
+    "ldr q23, [x22, x15]\n"
+    "fmla v17.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v13.4s, v18.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v15.4s, v18.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "str q17, [%[outptr0]]\n"
+    "fmla v10.4s, v18.4s, v6.4s\n"
+    "fmla v12.4s, v18.4s, v4.4s\n"
+    "ldr q17, [x21, x17]\n"
+    "fmla v14.4s, v18.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x19]\n"
+    "fmla v9.4s, v18.4s, v7.4s\n"
+    "prfm pldl1keep, [%[inptr0], x14]\n"
+    "fmla v11.4s, v18.4s, v5.4s\n"
+    "add x20, x20, #16\n"
+    "fmla v8.4s, v18.4s, v16.4s\n"
+    "ldr q24, [x23, x15]\n"
+    "fmla v15.4s, v22.4s, v3.4s\n"
+    "ldr q18, [x22, x17]\n"
+    "fmla v12.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "fmla v14.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x19]\n"
+    "fmla v11.4s, v22.4s, v7.4s\n"
+    "ldr q22, [x23, x17]\n"
+    "fmla v10.4s, v26.4s, v1.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v14.4s, v25.4s, v6.4s\n"
+    "ldr q25, [%[wbptr]]\n"
+    "fmla v9.4s, v26.4s, v2.4s\n"
+    "ldr q16, [%[wbptr], #16]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "fmla v10.4s, v27.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x19]\n"
+    "fmla v12.4s, v27.4s, v1.4s\n"
+    "add x22, x22, #16\n"
+    "str q13, [x24]\n"
+    "fmla v9.4s, v27.4s, v4.4s\n"
+    "fmla v11.4s, v27.4s, v2.4s\n"
+    "ldr q26, [%[inptr0]]\n"
+    "fmla v8.4s, v27.4s, v5.4s\n"
+    "ldr q28, [x20]\n"
+    "fmla v15.4s, v21.4s, v0.4s\n"
+    "ldr q29, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v14.4s, v21.4s, v1.4s\n"
+    "add x23, x23, #16\n"
+    "str q15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "ldr q5, [%[wbptr], #64]\n"
+    "fmla v8.4s, v21.4s, v7.4s\n"
+    "ldr q27, [x21]\n"
+    "fmla v14.4s, v20.4s, v3.4s\n"
+    "ldr q21, [x20, %[input_col_stride1]]\n"
+    "fmla v11.4s, v20.4s, v6.4s\n"
+    "ldr q20, [%[inptr0], x13]\n"
+    "fmla v10.4s, v19.4s, v0.4s\n"
+    "subs x28, x28, #1\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v8.4s, v19.4s, v2.4s\n"
+    "fmla v12.4s, v23.4s, v0.4s\n"
+    "ldr q7, [%[wbptr], #32]\n"
+    "str q10, [x25]\n"
+    "fmla v11.4s, v23.4s, v1.4s\n"
+    "fmla v9.4s, v23.4s, v3.4s\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "str q12, [x24, %[output_col_stride1]]\n"
+    "fmla v8.4s, v23.4s, v4.4s\n"
+    "fmla v14.4s, v17.4s, v0.4s\n"
+    "ldr q23, [x22]\n"
+    "fmla v11.4s, v17.4s, v3.4s\n"
+    "ldr q19, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v17.4s, v6.4s\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "str q14, [%[outptr0], x26]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "fmla v11.4s, v18.4s, v0.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v8.4s, v24.4s, v1.4s\n"
+    "ldr q6, [%[wbptr], #48]\n"
+    "str q9, [x25, %[output_col_stride1]]\n"
+    "mov v17.16b, v25.16b\n"
+    "str q11, [x24, x26]\n"
+    "mov v13.16b, v25.16b\n"
+    "fmla v8.4s, v18.4s, v3.4s\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "mov v15.16b, v25.16b\n"
+    "add x24, x24, #16\n"
+    "mov v10.16b, v25.16b\n"
+    "mov v12.16b, v25.16b\n"
+    "fmla v8.4s, v22.4s, v0.4s\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "mov v14.16b, v25.16b\n"
+    "mov v9.16b, v25.16b\n"
+    "mov v11.16b, v25.16b\n"
+    "fmla v17.4s, v26.4s, v16.4s\n"
+    "str q8, [x25, x26]\n"
+    "fmla v13.4s, v28.4s, v16.4s\n"
+    "mov v8.16b, v25.16b\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "fmla v17.4s, v28.4s, v5.4s\n"
+    "fmla v15.4s, v29.4s, v16.4s\n"
+    "add x25, x25, #16\n"
+    "fmla v17.4s, v29.4s, v7.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "ldr q30, [x20, x13]\n"
+    "fmla v13.4s, v27.4s, v5.4s\n"
+    "ldr q29, [%[inptr0], x15]\n"
+    "fmla v10.4s, v27.4s, v16.4s\n"
+    "ldr q28, [x23]\n"
+    "fmla v17.4s, v21.4s, v4.4s\n"
+    "ldr q24, [x22, %[input_col_stride1]]\n"
+    "fmla v13.4s, v21.4s, v7.4s\n"
+    "ldr q18, [x21, x13]\n"
+    "fmla v15.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [x20, x14]\n"
+    "fmla v12.4s, v21.4s, v16.4s\n"
+    "ldr q22, [x20, x15]\n"
+    "fmla v17.4s, v20.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v15.4s, v20.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v14.4s, v20.4s, v16.4s\n"
+    "ldr q25, [%[inptr0], x17]\n"
+    "fmla v13.4s, v23.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x19]\n"
+    "fmla v10.4s, v23.4s, v5.4s\n"
+    "ldr q26, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v19.4s, v1.4s\n"
+    "prfm pldl1keep, [x21, x14]\n"
+    "fmla v13.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v15.4s, v19.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v10.4s, v19.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, x19]\n"
+    "fmla v12.4s, v19.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, x14]\n"
+    "fmla v9.4s, v19.4s, v16.4s\n"
+    "ldr q27, [x22, x13]\n"
+    "fmla v17.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v13.4s, v30.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v15.4s, v30.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x14]\n"
+    "fmla v12.4s, v30.4s, v7.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v14.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x21, x18]\n"
+    "fmla v11.4s, v30.4s, v16.4s\n"
+    "ldr q21, [x21, x15]\n"
+    "fmla v15.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v14.4s, v29.4s, v7.4s\n"
+    "ldr q20, [x20, x17]\n"
+    "fmla v10.4s, v28.4s, v2.4s\n"
+    "ldr q19, [x23, x13]\n"
+    "fmla v13.4s, v24.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v12.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v10.4s, v24.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v9.4s, v24.4s, v5.4s\n"
+    "ldr q23, [x22, x15]\n"
+    "fmla v17.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v13.4s, v18.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v15.4s, v18.4s, v1.4s\n"
+    "add x20, x20, #16\n"
+    "str q17, [%[outptr0]]\n"
+    "fmla v10.4s, v18.4s, v6.4s\n"
+    "fmla v12.4s, v18.4s, v4.4s\n"
+    "ldr q17, [x21, x17]\n"
+    "fmla v14.4s, v18.4s, v2.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v9.4s, v18.4s, v7.4s\n"
+    "fmla v11.4s, v18.4s, v5.4s\n"
+    "fmla v8.4s, v18.4s, v16.4s\n"
+    "ldr q24, [x23, x15]\n"
+    "fmla v15.4s, v22.4s, v3.4s\n"
+    "ldr q18, [x22, x17]\n"
+    "fmla v12.4s, v22.4s, v6.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v14.4s, v22.4s, v4.4s\n"
+    "fmla v11.4s, v22.4s, v7.4s\n"
+    "fmla v10.4s, v26.4s, v1.4s\n"
+    "ldr q22, [x23, x17]\n"
+    "fmla v9.4s, v26.4s, v2.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v14.4s, v25.4s, v6.4s\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "fmla v10.4s, v27.4s, v3.4s\n"
+    "fmla v12.4s, v27.4s, v1.4s\n"
+    "fmla v9.4s, v27.4s, v4.4s\n"
+    "fmla v11.4s, v27.4s, v2.4s\n"
+    "str q13, [x24]\n"
+    "fmla v8.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v21.4s, v0.4s\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "fmla v14.4s, v21.4s, v1.4s\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "fmla v8.4s, v21.4s, v7.4s\n"
+    "str q15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v19.4s, v0.4s\n"
+    "fmla v14.4s, v20.4s, v3.4s\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v11.4s, v20.4s, v6.4s\n"
+    "fmla v8.4s, v19.4s, v2.4s\n"
+    "str q10, [x25]\n"
+    "fmla v12.4s, v23.4s, v0.4s\n"
+    "fmla v9.4s, v23.4s, v3.4s\n"
+    "fmla v14.4s, v17.4s, v0.4s\n"
+    "fmla v11.4s, v23.4s, v1.4s\n"
+    "fmla v8.4s, v23.4s, v4.4s\n"
+    "str q12, [x24, %[output_col_stride1]]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "str q14, [%[outptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v3.4s\n"
+    "fmla v8.4s, v17.4s, v6.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "str q9, [x25, %[output_col_stride1]]\n"
+    "fmla v11.4s, v18.4s, v0.4s\n"
+    "fmla v8.4s, v24.4s, v1.4s\n"
+    "str q11, [x24, x26]\n"
+    "fmla v8.4s, v18.4s, v3.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v8.4s, v22.4s, v0.4s\n"
+    "str q8, [x25, x26]\n"
+    "add x25, x25, #16\n"
+    "4:\n"
+    "cbz x27, 7f\n"
+    "ldr s25, [%[wbptr]]\n"
+    "mov v17.16b, v25.16b\n"
+    "ldr s16, [%[wbptr], #4]\n"
+    "mov v13.16b, v25.16b\n"
+    "ldr s7, [%[wbptr], #8]\n"
+    "mov v15.16b, v25.16b\n"
+    "ldr s6, [%[wbptr], #12]\n"
+    "mov v10.16b, v25.16b\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "mov v12.16b, v25.16b\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "mov v14.16b, v25.16b\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "mov v9.16b, v25.16b\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "mov v11.16b, v25.16b\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "mov v8.16b, v25.16b\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "ldr s26, [%[inptr0]]\n"
+    "subs x27, x27, #1\n"
+    "fmla v17.4s, v26.4s, v16.4s\n"
+    "ldr s28, [x20]\n"
+    "fmla v13.4s, v28.4s, v16.4s\n"
+    "ldr s29, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v15.4s, v29.4s, v16.4s\n"
+    "ldr s27, [x21]\n"
+    "fmla v17.4s, v28.4s, v5.4s\n"
+    "ldr s21, [x20, %[input_col_stride1]]\n"
+    "ldr s20, [%[inptr0], x13]\n"
+    "ldr s23, [x22]\n"
+    "ldr s19, [x21, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v17.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x19]\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "prfm pldl1keep, [x20, x19]\n"
+    "prfm pldl1keep, [%[inptr0], x14]\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "prfm pldl1keep, [x21, x19]\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "ldr s30, [x20, x13]\n"
+    "fmla v13.4s, v27.4s, v5.4s\n"
+    "ldr s29, [%[inptr0], x15]\n"
+    "fmla v10.4s, v27.4s, v16.4s\n"
+    "ldr s28, [x23]\n"
+    "fmla v17.4s, v21.4s, v4.4s\n"
+    "ldr s24, [x22, %[input_col_stride1]]\n"
+    "fmla v13.4s, v21.4s, v7.4s\n"
+    "ldr s18, [x21, x13]\n"
+    "fmla v15.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [x20, x14]\n"
+    "fmla v12.4s, v21.4s, v16.4s\n"
+    "ldr s22, [x20, x15]\n"
+    "fmla v17.4s, v20.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v15.4s, v20.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v14.4s, v20.4s, v16.4s\n"
+    "ldr s25, [%[inptr0], x17]\n"
+    "fmla v13.4s, v23.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x19]\n"
+    "fmla v10.4s, v23.4s, v5.4s\n"
+    "ldr s26, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v19.4s, v1.4s\n"
+    "prfm pldl1keep, [x21, x14]\n"
+    "fmla v13.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v15.4s, v19.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v10.4s, v19.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, x19]\n"
+    "fmla v12.4s, v19.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, x14]\n"
+    "fmla v9.4s, v19.4s, v16.4s\n"
+    "ldr s27, [x22, x13]\n"
+    "fmla v17.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v13.4s, v30.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v15.4s, v30.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x14]\n"
+    "fmla v12.4s, v30.4s, v7.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v14.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x21, x18]\n"
+    "fmla v11.4s, v30.4s, v16.4s\n"
+    "ldr s21, [x21, x15]\n"
+    "fmla v15.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v14.4s, v29.4s, v7.4s\n"
+    "ldr s20, [x20, x17]\n"
+    "fmla v10.4s, v28.4s, v2.4s\n"
+    "ldr s19, [x23, x13]\n"
+    "fmla v13.4s, v24.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v12.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v10.4s, v24.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v9.4s, v24.4s, v5.4s\n"
+    "ldr s23, [x22, x15]\n"
+    "fmla v17.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v13.4s, v18.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v15.4s, v18.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "str s17, [%[outptr0]]\n"
+    "fmla v10.4s, v18.4s, v6.4s\n"
+    "fmla v12.4s, v18.4s, v4.4s\n"
+    "ldr s17, [x21, x17]\n"
+    "fmla v14.4s, v18.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x19]\n"
+    "fmla v9.4s, v18.4s, v7.4s\n"
+    "prfm pldl1keep, [%[inptr0], x14]\n"
+    "fmla v11.4s, v18.4s, v5.4s\n"
+    "add x20, x20, #4\n"
+    "fmla v8.4s, v18.4s, v16.4s\n"
+    "ldr s24, [x23, x15]\n"
+    "fmla v15.4s, v22.4s, v3.4s\n"
+    "ldr s18, [x22, x17]\n"
+    "fmla v12.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "fmla v14.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x19]\n"
+    "fmla v11.4s, v22.4s, v7.4s\n"
+    "ldr s22, [x23, x17]\n"
+    "fmla v10.4s, v26.4s, v1.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v14.4s, v25.4s, v6.4s\n"
+    "ldr s25, [%[wbptr]]\n"
+    "fmla v9.4s, v26.4s, v2.4s\n"
+    "ldr s16, [%[wbptr], #4]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "fmla v10.4s, v27.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x19]\n"
+    "fmla v12.4s, v27.4s, v1.4s\n"
+    "add x22, x22, #4\n"
+    "str s13, [x24]\n"
+    "fmla v9.4s, v27.4s, v4.4s\n"
+    "fmla v11.4s, v27.4s, v2.4s\n"
+    "ldr s26, [%[inptr0]]\n"
+    "fmla v8.4s, v27.4s, v5.4s\n"
+    "ldr s28, [x20]\n"
+    "fmla v15.4s, v21.4s, v0.4s\n"
+    "ldr s29, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v14.4s, v21.4s, v1.4s\n"
+    "add x23, x23, #4\n"
+    "str s15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "ldr s5, [%[wbptr], #16]\n"
+    "fmla v8.4s, v21.4s, v7.4s\n"
+    "ldr s27, [x21]\n"
+    "fmla v14.4s, v20.4s, v3.4s\n"
+    "ldr s21, [x20, %[input_col_stride1]]\n"
+    "fmla v11.4s, v20.4s, v6.4s\n"
+    "ldr s20, [%[inptr0], x13]\n"
+    "fmla v10.4s, v19.4s, v0.4s\n"
+    "subs x27, x27, #1\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v8.4s, v19.4s, v2.4s\n"
+    "fmla v12.4s, v23.4s, v0.4s\n"
+    "ldr s7, [%[wbptr], #8]\n"
+    "str s10, [x25]\n"
+    "fmla v11.4s, v23.4s, v1.4s\n"
+    "fmla v9.4s, v23.4s, v3.4s\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "str s12, [x24, %[output_col_stride1]]\n"
+    "fmla v8.4s, v23.4s, v4.4s\n"
+    "fmla v14.4s, v17.4s, v0.4s\n"
+    "ldr s23, [x22]\n"
+    "fmla v11.4s, v17.4s, v3.4s\n"
+    "ldr s19, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v17.4s, v6.4s\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "str s14, [%[outptr0], x26]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "fmla v11.4s, v18.4s, v0.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v8.4s, v24.4s, v1.4s\n"
+    "ldr s6, [%[wbptr], #12]\n"
+    "str s9, [x25, %[output_col_stride1]]\n"
+    "mov v17.16b, v25.16b\n"
+    "str s11, [x24, x26]\n"
+    "mov v13.16b, v25.16b\n"
+    "fmla v8.4s, v18.4s, v3.4s\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "mov v15.16b, v25.16b\n"
+    "add x24, x24, #4\n"
+    "mov v10.16b, v25.16b\n"
+    "mov v12.16b, v25.16b\n"
+    "fmla v8.4s, v22.4s, v0.4s\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "mov v14.16b, v25.16b\n"
+    "mov v9.16b, v25.16b\n"
+    "mov v11.16b, v25.16b\n"
+    "fmla v17.4s, v26.4s, v16.4s\n"
+    "str s8, [x25, x26]\n"
+    "fmla v13.4s, v28.4s, v16.4s\n"
+    "mov v8.16b, v25.16b\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "fmla v17.4s, v28.4s, v5.4s\n"
+    "fmla v15.4s, v29.4s, v16.4s\n"
+    "add x25, x25, #4\n"
+    "fmla v17.4s, v29.4s, v7.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "ldr s30, [x20, x13]\n"
+    "fmla v13.4s, v27.4s, v5.4s\n"
+    "ldr s29, [%[inptr0], x15]\n"
+    "fmla v10.4s, v27.4s, v16.4s\n"
+    "ldr s28, [x23]\n"
+    "fmla v17.4s, v21.4s, v4.4s\n"
+    "ldr s24, [x22, %[input_col_stride1]]\n"
+    "fmla v13.4s, v21.4s, v7.4s\n"
+    "ldr s18, [x21, x13]\n"
+    "fmla v15.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [x20, x14]\n"
+    "fmla v12.4s, v21.4s, v16.4s\n"
+    "ldr s22, [x20, x15]\n"
+    "fmla v17.4s, v20.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v15.4s, v20.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v14.4s, v20.4s, v16.4s\n"
+    "ldr s25, [%[inptr0], x17]\n"
+    "fmla v13.4s, v23.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x19]\n"
+    "fmla v10.4s, v23.4s, v5.4s\n"
+    "ldr s26, [x23, %[input_col_stride1]]\n"
+    "fmla v17.4s, v19.4s, v1.4s\n"
+    "prfm pldl1keep, [x21, x14]\n"
+    "fmla v13.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v15.4s, v19.4s, v2.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v10.4s, v19.4s, v7.4s\n"
+    "prfm pldl1keep, [x23, x19]\n"
+    "fmla v12.4s, v19.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, x14]\n"
+    "fmla v9.4s, v19.4s, v16.4s\n"
+    "ldr s27, [x22, x13]\n"
+    "fmla v17.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v13.4s, v30.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v15.4s, v30.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x14]\n"
+    "fmla v12.4s, v30.4s, v7.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v14.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x21, x18]\n"
+    "fmla v11.4s, v30.4s, v16.4s\n"
+    "ldr s21, [x21, x15]\n"
+    "fmla v15.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v14.4s, v29.4s, v7.4s\n"
+    "ldr s20, [x20, x17]\n"
+    "fmla v10.4s, v28.4s, v2.4s\n"
+    "ldr s19, [x23, x13]\n"
+    "fmla v13.4s, v24.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v12.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v10.4s, v24.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v9.4s, v24.4s, v5.4s\n"
+    "ldr s23, [x22, x15]\n"
+    "fmla v17.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v13.4s, v18.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v15.4s, v18.4s, v1.4s\n"
+    "add x20, x20, #4\n"
+    "str s17, [%[outptr0]]\n"
+    "fmla v10.4s, v18.4s, v6.4s\n"
+    "fmla v12.4s, v18.4s, v4.4s\n"
+    "ldr s17, [x21, x17]\n"
+    "fmla v14.4s, v18.4s, v2.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v9.4s, v18.4s, v7.4s\n"
+    "fmla v11.4s, v18.4s, v5.4s\n"
+    "fmla v8.4s, v18.4s, v16.4s\n"
+    "ldr s24, [x23, x15]\n"
+    "fmla v15.4s, v22.4s, v3.4s\n"
+    "ldr s18, [x22, x17]\n"
+    "fmla v12.4s, v22.4s, v6.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v14.4s, v22.4s, v4.4s\n"
+    "fmla v11.4s, v22.4s, v7.4s\n"
+    "fmla v10.4s, v26.4s, v1.4s\n"
+    "ldr s22, [x23, x17]\n"
+    "fmla v9.4s, v26.4s, v2.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v14.4s, v25.4s, v6.4s\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "fmla v10.4s, v27.4s, v3.4s\n"
+    "fmla v12.4s, v27.4s, v1.4s\n"
+    "fmla v9.4s, v27.4s, v4.4s\n"
+    "fmla v11.4s, v27.4s, v2.4s\n"
+    "str s13, [x24]\n"
+    "fmla v8.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v21.4s, v0.4s\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "fmla v14.4s, v21.4s, v1.4s\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "fmla v8.4s, v21.4s, v7.4s\n"
+    "str s15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v10.4s, v19.4s, v0.4s\n"
+    "fmla v14.4s, v20.4s, v3.4s\n"
+    "fmla v9.4s, v19.4s, v1.4s\n"
+    "fmla v11.4s, v20.4s, v6.4s\n"
+    "fmla v8.4s, v19.4s, v2.4s\n"
+    "str s10, [x25]\n"
+    "fmla v12.4s, v23.4s, v0.4s\n"
+    "fmla v9.4s, v23.4s, v3.4s\n"
+    "fmla v14.4s, v17.4s, v0.4s\n"
+    "fmla v11.4s, v23.4s, v1.4s\n"
+    "fmla v8.4s, v23.4s, v4.4s\n"
+    "str s12, [x24, %[output_col_stride1]]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "str s14, [%[outptr0], x26]\n"
+    "fmla v11.4s, v17.4s, v3.4s\n"
+    "fmla v8.4s, v17.4s, v6.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "str s9, [x25, %[output_col_stride1]]\n"
+    "fmla v11.4s, v18.4s, v0.4s\n"
+    "fmla v8.4s, v24.4s, v1.4s\n"
+    "str s11, [x24, x26]\n"
+    "fmla v8.4s, v18.4s, v3.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v8.4s, v22.4s, v0.4s\n"
+    "str s8, [x25, x26]\n"
+    "add x25, x25, #4\n"
+    "7:\n"
+    : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
-  int channels_remaining = n_channels;
-  if (channels_remaining >= 4)
-  {
-    // Process blocks of 4 channels at a time
-    int n_iters = ((channels_remaining / 4) + 1)/2 - 1;
-    const bool odd_tail = (channels_remaining / 4) & 1;
-    channels_remaining %= 4;
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x25, %[inptr0], %[input_row_stride]\n"
+    "add x16, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x21, %[outptr0], %[output_row_stride]\n"
+    "add x22, x25, %[input_row_stride]\n"
+    "add x23, x16, #64\n"
+    "add x26, x16, %[input_col_stride1]\n"
+    "add x13, x22, %[input_row_stride]\n"
+    "add x20, x26, #64\n"
+    "add x18, x26, %[input_col_stride1]\n"
+    "add x24, x13, %[input_row_stride]\n"
+    "add x15, x18, #64\n"
+    "add x14, x21, %[output_row_stride]\n"
+    "add x19, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x27, %[n_channels], #3\n"
+    "lsr x28, %[n_channels], #2\n"
+    "cbz x28, 4f\n"
+    "1:\n"
+    "ldr q20, [%[wbptr]]\n"
+    "subs x28, x28, #1\n"
+    "mov v4.16b, v20.16b\n"
+    "ldr q15, [%[wbptr], #16]\n"
+    "mov v1.16b, v20.16b\n"
+    "ldr q0, [%[wbptr], #32]\n"
+    "mov v3.16b, v20.16b\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "mov v7.16b, v20.16b\n"
+    "ldr q16, [%[wbptr], #64]\n"
+    "mov v9.16b, v20.16b\n"
+    "ldr q12, [%[wbptr], #80]\n"
+    "mov v2.16b, v20.16b\n"
+    "ldr q17, [%[wbptr], #96]\n"
+    "mov v6.16b, v20.16b\n"
+    "ldr q11, [%[wbptr], #112]\n"
+    "mov v8.16b, v20.16b\n"
+    "ldr q10, [%[wbptr], #128]\n"
+    "mov v5.16b, v20.16b\n"
+    "ldr q14, [%[wbptr], #144]\n"
+    "ldr q27, [%[inptr0]]\n"
+    "ldr q24, [x25]\n"
+    "fmla v4.4s, v27.4s, v15.4s\n"
+    "ldr q22, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q21, [x22]\n"
+    "ldr q19, [x25, %[input_col_stride1]]\n"
+    "ldr q31, [%[inptr0], x16]\n"
+    "ldr q28, [x13]\n"
+    "fmla v4.4s, v24.4s, v16.4s\n"
+    "ldr q18, [x22, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x25, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x17]\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "prfm pldl1keep, [x25, x17]\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "prfm pldl1keep, [x13, #64]\n"
+    "prfm pldl1keep, [x22, x17]\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v1.4s, v24.4s, v15.4s\n"
+    "ldr q24, [x25, x16]\n"
+    "fmla v4.4s, v22.4s, v0.4s\n"
+    "ldr q29, [%[inptr0], x26]\n"
+    "fmla v3.4s, v22.4s, v15.4s\n"
+    "ldr q30, [x24]\n"
+    "fmla v1.4s, v21.4s, v16.4s\n"
+    "ldr q25, [x13, %[input_col_stride1]]\n"
+    "fmla v4.4s, v21.4s, v11.4s\n"
+    "prfm pldl1keep, [x25, x23]\n"
+    "fmla v7.4s, v21.4s, v15.4s\n"
+    "ldr q26, [x22, x16]\n"
+    "fmla v1.4s, v19.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v4.4s, v19.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v3.4s, v19.4s, v16.4s\n"
+    "prfm pldl1keep, [x13, x17]\n"
+    "fmla v9.4s, v19.4s, v15.4s\n"
+    "ldr q23, [x25, x26]\n"
+    "fmla v4.4s, v31.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x23]\n"
+    "fmla v3.4s, v31.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x20]\n"
+    "fmla v2.4s, v31.4s, v15.4s\n"
+    "ldr q20, [%[inptr0], x18]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "ldr q28, [x24, %[input_col_stride1]]\n"
+    "fmla v4.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x17]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "prfm pldl1keep, [x13, x23]\n"
+    "fmla v3.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x22, x20]\n"
+    "fmla v7.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x15]\n"
+    "fmla v9.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x23]\n"
+    "fmla v6.4s, v18.4s, v15.4s\n"
+    "ldr q27, [x13, x16]\n"
+    "fmla v4.4s, v24.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, x20]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x15]\n"
+    "fmla v3.4s, v24.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, x20]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "prfm pldl1keep, [x13, x15]\n"
+    "fmla v2.4s, v24.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v8.4s, v24.4s, v15.4s\n"
+    "ldr q24, [x22, x26]\n"
+    "fmla v3.4s, v29.4s, v13.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v2.4s, v29.4s, v0.4s\n"
+    "ldr q22, [x25, x18]\n"
+    "fmla v7.4s, v30.4s, v11.4s\n"
+    "ldr q21, [x24, x16]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v25.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v7.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v6.4s, v25.4s, v16.4s\n"
+    "ldr q19, [x13, x26]\n"
+    "fmla v4.4s, v26.4s, v14.4s\n"
+    "prfm pldl1keep, [%[inptr0], x17]\n"
+    "fmla v1.4s, v26.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v3.4s, v26.4s, v10.4s\n"
+    "add x25, x25, #16\n"
+    "fmla v7.4s, v26.4s, v13.4s\n"
+    "prfm pldl1keep, [x25, #64]\n"
+    "fmla v9.4s, v26.4s, v12.4s\n"
+    "prfm pldl1keep, [x25, x17]\n"
+    "fmla v2.4s, v26.4s, v11.4s\n"
+    "subs x28, x28, #1\n"
+    "fmla v6.4s, v26.4s, v0.4s\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "fmla v5.4s, v26.4s, v15.4s\n"
+    "ldr q26, [x22, x18]\n"
+    "fmla v3.4s, v23.4s, v17.4s\n"
+    "ldr q18, [x24, x26]\n"
+    "fmla v9.4s, v23.4s, v13.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v8.4s, v23.4s, v0.4s\n"
+    "ldr q23, [x13, x18]\n"
+    "fmla v7.4s, v28.4s, v10.4s\n"
+    "prfm pldl1keep, [x22, x17]\n"
+    "fmla v2.4s, v20.4s, v13.4s\n"
+    "ldr q25, [x24, x18]\n"
+    "fmla v6.4s, v28.4s, v11.4s\n"
+    "ldr q20, [%[wbptr]]\n"
+    "fmla v1.4s, v27.4s, v14.4s\n"
+    "add x13, x13, #16\n"
+    "fmla v7.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, #64]\n"
+    "fmla v9.4s, v27.4s, v10.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v6.4s, v27.4s, v12.4s\n"
+    "fmla v8.4s, v27.4s, v11.4s\n"
+    "fmla v5.4s, v27.4s, v16.4s\n"
+    "ldr q15, [%[wbptr], #16]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "ldr q27, [%[inptr0]]\n"
+    "fmla v9.4s, v24.4s, v17.4s\n"
+    "fmla v2.4s, v24.4s, v10.4s\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "fmla v8.4s, v24.4s, v12.4s\n"
+    "fmla v5.4s, v24.4s, v0.4s\n"
+    "ldr q16, [%[wbptr], #64]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "ldr q24, [x25]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "ldr q22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v7.4s, v21.4s, v14.4s\n"
+    "fmla v6.4s, v21.4s, v10.4s\n"
+    "fmla v5.4s, v21.4s, v11.4s\n"
+    "ldr q0, [%[wbptr], #32]\n"
+    "fmla v9.4s, v19.4s, v14.4s\n"
+    "ldr q21, [x22]\n"
+    "fmla v6.4s, v19.4s, v17.4s\n"
+    "fmla v8.4s, v19.4s, v10.4s\n"
+    "fmla v5.4s, v19.4s, v12.4s\n"
+    "ldr q11, [%[wbptr], #112]\n"
+    "fmla v2.4s, v26.4s, v14.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v8.4s, v26.4s, v17.4s\n"
+    "fmla v6.4s, v18.4s, v14.4s\n"
+    "fmla v5.4s, v26.4s, v13.4s\n"
+    "ldr q12, [%[wbptr], #80]\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "ldr q19, [x25, %[input_col_stride1]]\n"
+    "fmla v8.4s, v23.4s, v14.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "str q4, [%[outptr0]]\n"
+    "fmla v5.4s, v18.4s, v10.4s\n"
+    "str q3, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "str q2, [%[outptr0], x19]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "str q1, [x21]\n"
+    "fmax v9.4s, v9.4s, v29.4s\n"
+    "fmax v8.4s, v8.4s, v29.4s\n"
+    "ldr q10, [%[wbptr], #128]\n"
+    "str q9, [x21, %[output_col_stride1]]\n"
+    "fmla v5.4s, v25.4s, v14.4s\n"
+    "str q8, [x21, x19]\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "ldr q17, [%[wbptr], #96]\n"
+    "str q7, [x14]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "str q6, [x14, %[output_col_stride1]]\n"
+    "mov v4.16b, v20.16b\n"
+    "str q5, [x14, x19]\n"
+    "mov v1.16b, v20.16b\n"
+    "mov v3.16b, v20.16b\n"
+    "ldr q14, [%[wbptr], #144]\n"
+    "mov v7.16b, v20.16b\n"
+    "ldr q31, [%[inptr0], x16]\n"
+    "mov v9.16b, v20.16b\n"
+    "ldr q28, [x13]\n"
+    "mov v2.16b, v20.16b\n"
+    "ldr q18, [x22, %[input_col_stride1]]\n"
+    "mov v6.16b, v20.16b\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "mov v8.16b, v20.16b\n"
+    "add x21, x21, #16\n"
+    "mov v5.16b, v20.16b\n"
+    "add x14, x14, #16\n"
+    "fmla v4.4s, v27.4s, v15.4s\n"
+    "fmla v4.4s, v24.4s, v16.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v1.4s, v24.4s, v15.4s\n"
+    "ldr q24, [x25, x16]\n"
+    "fmla v4.4s, v22.4s, v0.4s\n"
+    "ldr q29, [%[inptr0], x26]\n"
+    "fmla v3.4s, v22.4s, v15.4s\n"
+    "ldr q30, [x24]\n"
+    "fmla v1.4s, v21.4s, v16.4s\n"
+    "ldr q25, [x13, %[input_col_stride1]]\n"
+    "fmla v4.4s, v21.4s, v11.4s\n"
+    "prfm pldl1keep, [x25, x23]\n"
+    "fmla v7.4s, v21.4s, v15.4s\n"
+    "ldr q26, [x22, x16]\n"
+    "fmla v1.4s, v19.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v4.4s, v19.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v3.4s, v19.4s, v16.4s\n"
+    "prfm pldl1keep, [x13, x17]\n"
+    "fmla v9.4s, v19.4s, v15.4s\n"
+    "ldr q23, [x25, x26]\n"
+    "fmla v4.4s, v31.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x23]\n"
+    "fmla v3.4s, v31.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x20]\n"
+    "fmla v2.4s, v31.4s, v15.4s\n"
+    "ldr q20, [%[inptr0], x18]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "ldr q28, [x24, %[input_col_stride1]]\n"
+    "fmla v4.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x17]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "prfm pldl1keep, [x13, x23]\n"
+    "fmla v3.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x22, x20]\n"
+    "fmla v7.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x15]\n"
+    "fmla v9.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x23]\n"
+    "fmla v6.4s, v18.4s, v15.4s\n"
+    "ldr q27, [x13, x16]\n"
+    "fmla v4.4s, v24.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, x20]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x15]\n"
+    "fmla v3.4s, v24.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, x20]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "prfm pldl1keep, [x13, x15]\n"
+    "fmla v2.4s, v24.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v8.4s, v24.4s, v15.4s\n"
+    "ldr q24, [x22, x26]\n"
+    "fmla v3.4s, v29.4s, v13.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v2.4s, v29.4s, v0.4s\n"
+    "ldr q22, [x25, x18]\n"
+    "fmla v7.4s, v30.4s, v11.4s\n"
+    "ldr q21, [x24, x16]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v25.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v7.4s, v25.4s, v12.4s\n"
+    "add x25, x25, #16\n"
+    "fmla v6.4s, v25.4s, v16.4s\n"
+    "ldr q19, [x13, x26]\n"
+    "fmla v4.4s, v26.4s, v14.4s\n"
+    "fmla v1.4s, v26.4s, v17.4s\n"
+    "fmla v3.4s, v26.4s, v10.4s\n"
+    "fmla v7.4s, v26.4s, v13.4s\n"
+    "fmla v9.4s, v26.4s, v12.4s\n"
+    "fmla v2.4s, v26.4s, v11.4s\n"
+    "fmla v6.4s, v26.4s, v0.4s\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "fmla v5.4s, v26.4s, v15.4s\n"
+    "ldr q26, [x22, x18]\n"
+    "fmla v3.4s, v23.4s, v17.4s\n"
+    "ldr q18, [x24, x26]\n"
+    "fmla v9.4s, v23.4s, v13.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "fmla v8.4s, v23.4s, v0.4s\n"
+    "fmla v7.4s, v28.4s, v10.4s\n"
+    "ldr q23, [x13, x18]\n"
+    "fmla v6.4s, v28.4s, v11.4s\n"
+    "ldr q25, [x24, x18]\n"
+    "fmla v2.4s, v20.4s, v13.4s\n"
+    "add x13, x13, #16\n"
+    "fmla v1.4s, v27.4s, v14.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v7.4s, v27.4s, v17.4s\n"
+    "fmla v9.4s, v27.4s, v10.4s\n"
+    "fmla v6.4s, v27.4s, v12.4s\n"
+    "fmla v8.4s, v27.4s, v11.4s\n"
+    "fmla v5.4s, v27.4s, v16.4s\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "fmla v9.4s, v24.4s, v17.4s\n"
+    "fmla v2.4s, v24.4s, v10.4s\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "fmla v8.4s, v24.4s, v12.4s\n"
+    "fmla v5.4s, v24.4s, v0.4s\n"
+    "fmla v7.4s, v21.4s, v14.4s\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "fmla v9.4s, v19.4s, v14.4s\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "fmla v6.4s, v21.4s, v10.4s\n"
+    "fmla v5.4s, v21.4s, v11.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v2.4s, v26.4s, v14.4s\n"
+    "fmla v6.4s, v19.4s, v17.4s\n"
+    "fmla v8.4s, v19.4s, v10.4s\n"
+    "fmla v5.4s, v19.4s, v12.4s\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "fmla v6.4s, v18.4s, v14.4s\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "str q4, [%[outptr0]]\n"
+    "fmla v8.4s, v26.4s, v17.4s\n"
+    "str q3, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v5.4s, v26.4s, v13.4s\n"
+    "str q2, [%[outptr0], x19]\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "fmla v8.4s, v23.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "str q1, [x21]\n"
+    "fmla v5.4s, v18.4s, v10.4s\n"
+    "fmax v9.4s, v9.4s, v29.4s\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v8.4s, v8.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "str q9, [x21, %[output_col_stride1]]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "str q8, [x21, x19]\n"
+    "str q7, [x14]\n"
+    "str q6, [x14, %[output_col_stride1]]\n"
+    "add x21, x21, #16\n"
+    "fmla v5.4s, v25.4s, v14.4s\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "str q5, [x14, x19]\n"
+    "add x14, x14, #16\n"
+    "4:\n"
+    "cbz x27, 7f\n"
+    "ldr s20, [%[wbptr]]\n"
+    "mov v4.16b, v20.16b\n"
+    "ldr s15, [%[wbptr], #4]\n"
+    "mov v1.16b, v20.16b\n"
+    "ldr s0, [%[wbptr], #8]\n"
+    "mov v3.16b, v20.16b\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "mov v7.16b, v20.16b\n"
+    "ldr s16, [%[wbptr], #16]\n"
+    "mov v9.16b, v20.16b\n"
+    "ldr s12, [%[wbptr], #20]\n"
+    "mov v2.16b, v20.16b\n"
+    "ldr s17, [%[wbptr], #24]\n"
+    "mov v6.16b, v20.16b\n"
+    "ldr s11, [%[wbptr], #28]\n"
+    "mov v8.16b, v20.16b\n"
+    "ldr s10, [%[wbptr], #32]\n"
+    "mov v5.16b, v20.16b\n"
+    "ldr s14, [%[wbptr], #36]\n"
+    "ldr s27, [%[inptr0]]\n"
+    "subs x27, x27, #1\n"
+    "fmla v4.4s, v27.4s, v15.4s\n"
+    "ldr s24, [x25]\n"
+    "ldr s22, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s21, [x22]\n"
+    "ldr s19, [x25, %[input_col_stride1]]\n"
+    "ldr s31, [%[inptr0], x16]\n"
+    "fmla v4.4s, v24.4s, v16.4s\n"
+    "ldr s28, [x13]\n"
+    "ldr s18, [x22, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x25, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x17]\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "prfm pldl1keep, [x25, x17]\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "prfm pldl1keep, [x13, #64]\n"
+    "prfm pldl1keep, [x22, x17]\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v1.4s, v24.4s, v15.4s\n"
+    "ldr s24, [x25, x16]\n"
+    "fmla v4.4s, v22.4s, v0.4s\n"
+    "ldr s29, [%[inptr0], x26]\n"
+    "fmla v3.4s, v22.4s, v15.4s\n"
+    "ldr s30, [x24]\n"
+    "fmla v1.4s, v21.4s, v16.4s\n"
+    "ldr s25, [x13, %[input_col_stride1]]\n"
+    "fmla v4.4s, v21.4s, v11.4s\n"
+    "prfm pldl1keep, [x25, x23]\n"
+    "fmla v7.4s, v21.4s, v15.4s\n"
+    "ldr s26, [x22, x16]\n"
+    "fmla v1.4s, v19.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v4.4s, v19.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v3.4s, v19.4s, v16.4s\n"
+    "prfm pldl1keep, [x13, x17]\n"
+    "fmla v9.4s, v19.4s, v15.4s\n"
+    "ldr s23, [x25, x26]\n"
+    "fmla v4.4s, v31.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x23]\n"
+    "fmla v3.4s, v31.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x20]\n"
+    "fmla v2.4s, v31.4s, v15.4s\n"
+    "ldr s20, [%[inptr0], x18]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "ldr s28, [x24, %[input_col_stride1]]\n"
+    "fmla v4.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x17]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "prfm pldl1keep, [x13, x23]\n"
+    "fmla v3.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x22, x20]\n"
+    "fmla v7.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x15]\n"
+    "fmla v9.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x23]\n"
+    "fmla v6.4s, v18.4s, v15.4s\n"
+    "ldr s27, [x13, x16]\n"
+    "fmla v4.4s, v24.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, x20]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x15]\n"
+    "fmla v3.4s, v24.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, x20]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "prfm pldl1keep, [x13, x15]\n"
+    "fmla v2.4s, v24.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v8.4s, v24.4s, v15.4s\n"
+    "ldr s24, [x22, x26]\n"
+    "fmla v3.4s, v29.4s, v13.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v2.4s, v29.4s, v0.4s\n"
+    "ldr s22, [x25, x18]\n"
+    "fmla v7.4s, v30.4s, v11.4s\n"
+    "ldr s21, [x24, x16]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v25.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v7.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v6.4s, v25.4s, v16.4s\n"
+    "ldr s19, [x13, x26]\n"
+    "fmla v4.4s, v26.4s, v14.4s\n"
+    "prfm pldl1keep, [%[inptr0], x17]\n"
+    "fmla v1.4s, v26.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v3.4s, v26.4s, v10.4s\n"
+    "add x25, x25, #4\n"
+    "fmla v7.4s, v26.4s, v13.4s\n"
+    "prfm pldl1keep, [x25, #64]\n"
+    "fmla v9.4s, v26.4s, v12.4s\n"
+    "prfm pldl1keep, [x25, x17]\n"
+    "fmla v2.4s, v26.4s, v11.4s\n"
+    "subs x27, x27, #1\n"
+    "fmla v6.4s, v26.4s, v0.4s\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "fmla v5.4s, v26.4s, v15.4s\n"
+    "ldr s26, [x22, x18]\n"
+    "fmla v3.4s, v23.4s, v17.4s\n"
+    "ldr s18, [x24, x26]\n"
+    "fmla v9.4s, v23.4s, v13.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v8.4s, v23.4s, v0.4s\n"
+    "ldr s23, [x13, x18]\n"
+    "fmla v7.4s, v28.4s, v10.4s\n"
+    "prfm pldl1keep, [x22, x17]\n"
+    "fmla v2.4s, v20.4s, v13.4s\n"
+    "ldr s25, [x24, x18]\n"
+    "fmla v6.4s, v28.4s, v11.4s\n"
+    "ldr s20, [%[wbptr]]\n"
+    "fmla v1.4s, v27.4s, v14.4s\n"
+    "add x13, x13, #4\n"
+    "fmla v7.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, #64]\n"
+    "fmla v9.4s, v27.4s, v10.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v6.4s, v27.4s, v12.4s\n"
+    "fmla v8.4s, v27.4s, v11.4s\n"
+    "fmla v5.4s, v27.4s, v16.4s\n"
+    "ldr s15, [%[wbptr], #4]\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "ldr s27, [%[inptr0]]\n"
+    "fmla v9.4s, v24.4s, v17.4s\n"
+    "fmla v2.4s, v24.4s, v10.4s\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "fmla v8.4s, v24.4s, v12.4s\n"
+    "fmla v5.4s, v24.4s, v0.4s\n"
+    "ldr s16, [%[wbptr], #16]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "ldr s24, [x25]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "ldr s22, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v7.4s, v21.4s, v14.4s\n"
+    "fmla v6.4s, v21.4s, v10.4s\n"
+    "fmla v5.4s, v21.4s, v11.4s\n"
+    "ldr s0, [%[wbptr], #8]\n"
+    "fmla v9.4s, v19.4s, v14.4s\n"
+    "ldr s21, [x22]\n"
+    "fmla v6.4s, v19.4s, v17.4s\n"
+    "fmla v8.4s, v19.4s, v10.4s\n"
+    "fmla v5.4s, v19.4s, v12.4s\n"
+    "ldr s11, [%[wbptr], #28]\n"
+    "fmla v2.4s, v26.4s, v14.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v8.4s, v26.4s, v17.4s\n"
+    "fmla v6.4s, v18.4s, v14.4s\n"
+    "fmla v5.4s, v26.4s, v13.4s\n"
+    "ldr s12, [%[wbptr], #20]\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "ldr s19, [x25, %[input_col_stride1]]\n"
+    "fmla v8.4s, v23.4s, v14.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "str s4, [%[outptr0]]\n"
+    "fmla v5.4s, v18.4s, v10.4s\n"
+    "str s3, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "str s2, [%[outptr0], x19]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "str s1, [x21]\n"
+    "fmax v9.4s, v9.4s, v29.4s\n"
+    "fmax v8.4s, v8.4s, v29.4s\n"
+    "ldr s10, [%[wbptr], #32]\n"
+    "str s9, [x21, %[output_col_stride1]]\n"
+    "fmla v5.4s, v25.4s, v14.4s\n"
+    "str s8, [x21, x19]\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "ldr s17, [%[wbptr], #24]\n"
+    "str s7, [x14]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "str s6, [x14, %[output_col_stride1]]\n"
+    "mov v4.16b, v20.16b\n"
+    "str s5, [x14, x19]\n"
+    "mov v1.16b, v20.16b\n"
+    "mov v3.16b, v20.16b\n"
+    "ldr s14, [%[wbptr], #36]\n"
+    "mov v7.16b, v20.16b\n"
+    "ldr s31, [%[inptr0], x16]\n"
+    "mov v9.16b, v20.16b\n"
+    "ldr s28, [x13]\n"
+    "mov v2.16b, v20.16b\n"
+    "ldr s18, [x22, %[input_col_stride1]]\n"
+    "mov v6.16b, v20.16b\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "mov v8.16b, v20.16b\n"
+    "add x21, x21, #4\n"
+    "mov v5.16b, v20.16b\n"
+    "add x14, x14, #4\n"
+    "fmla v4.4s, v27.4s, v15.4s\n"
+    "fmla v4.4s, v24.4s, v16.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v1.4s, v24.4s, v15.4s\n"
+    "ldr s24, [x25, x16]\n"
+    "fmla v4.4s, v22.4s, v0.4s\n"
+    "ldr s29, [%[inptr0], x26]\n"
+    "fmla v3.4s, v22.4s, v15.4s\n"
+    "ldr s30, [x24]\n"
+    "fmla v1.4s, v21.4s, v16.4s\n"
+    "ldr s25, [x13, %[input_col_stride1]]\n"
+    "fmla v4.4s, v21.4s, v11.4s\n"
+    "prfm pldl1keep, [x25, x23]\n"
+    "fmla v7.4s, v21.4s, v15.4s\n"
+    "ldr s26, [x22, x16]\n"
+    "fmla v1.4s, v19.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v4.4s, v19.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v3.4s, v19.4s, v16.4s\n"
+    "prfm pldl1keep, [x13, x17]\n"
+    "fmla v9.4s, v19.4s, v15.4s\n"
+    "ldr s23, [x25, x26]\n"
+    "fmla v4.4s, v31.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x23]\n"
+    "fmla v3.4s, v31.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x20]\n"
+    "fmla v2.4s, v31.4s, v15.4s\n"
+    "ldr s20, [%[inptr0], x18]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "ldr s28, [x24, %[input_col_stride1]]\n"
+    "fmla v4.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x17]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "prfm pldl1keep, [x13, x23]\n"
+    "fmla v3.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x22, x20]\n"
+    "fmla v7.4s, v18.4s, v0.4s\n"
+    "prfm pldl1keep, [x25, x15]\n"
+    "fmla v9.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x23]\n"
+    "fmla v6.4s, v18.4s, v15.4s\n"
+    "ldr s27, [x13, x16]\n"
+    "fmla v4.4s, v24.4s, v17.4s\n"
+    "prfm pldl1keep, [x13, x20]\n"
+    "fmla v1.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x22, x15]\n"
+    "fmla v3.4s, v24.4s, v12.4s\n"
+    "prfm pldl1keep, [x24, x20]\n"
+    "fmla v9.4s, v24.4s, v0.4s\n"
+    "prfm pldl1keep, [x13, x15]\n"
+    "fmla v2.4s, v24.4s, v16.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v8.4s, v24.4s, v15.4s\n"
+    "ldr s24, [x22, x26]\n"
+    "fmla v3.4s, v29.4s, v13.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v2.4s, v29.4s, v0.4s\n"
+    "ldr s22, [x25, x18]\n"
+    "fmla v7.4s, v30.4s, v11.4s\n"
+    "ldr s21, [x24, x16]\n"
+    "fmla v1.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v9.4s, v25.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v7.4s, v25.4s, v12.4s\n"
+    "add x25, x25, #4\n"
+    "fmla v6.4s, v25.4s, v16.4s\n"
+    "ldr s19, [x13, x26]\n"
+    "fmla v4.4s, v26.4s, v14.4s\n"
+    "fmla v1.4s, v26.4s, v17.4s\n"
+    "fmla v3.4s, v26.4s, v10.4s\n"
+    "fmla v7.4s, v26.4s, v13.4s\n"
+    "fmla v9.4s, v26.4s, v12.4s\n"
+    "fmla v2.4s, v26.4s, v11.4s\n"
+    "fmla v6.4s, v26.4s, v0.4s\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "fmla v5.4s, v26.4s, v15.4s\n"
+    "ldr s26, [x22, x18]\n"
+    "fmla v3.4s, v23.4s, v17.4s\n"
+    "ldr s18, [x24, x26]\n"
+    "fmla v9.4s, v23.4s, v13.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v2.4s, v23.4s, v12.4s\n"
+    "fmla v8.4s, v23.4s, v0.4s\n"
+    "fmla v7.4s, v28.4s, v10.4s\n"
+    "ldr s23, [x13, x18]\n"
+    "fmla v6.4s, v28.4s, v11.4s\n"
+    "ldr s25, [x24, x18]\n"
+    "fmla v2.4s, v20.4s, v13.4s\n"
+    "add x13, x13, #4\n"
+    "fmla v1.4s, v27.4s, v14.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v7.4s, v27.4s, v17.4s\n"
+    "fmla v9.4s, v27.4s, v10.4s\n"
+    "fmla v6.4s, v27.4s, v12.4s\n"
+    "fmla v8.4s, v27.4s, v11.4s\n"
+    "fmla v5.4s, v27.4s, v16.4s\n"
+    "fmla v3.4s, v24.4s, v14.4s\n"
+    "fmla v9.4s, v24.4s, v17.4s\n"
+    "fmla v2.4s, v24.4s, v10.4s\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "fmla v8.4s, v24.4s, v12.4s\n"
+    "fmla v5.4s, v24.4s, v0.4s\n"
+    "fmla v7.4s, v21.4s, v14.4s\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "fmla v9.4s, v19.4s, v14.4s\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "fmla v6.4s, v21.4s, v10.4s\n"
+    "fmla v5.4s, v21.4s, v11.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v2.4s, v26.4s, v14.4s\n"
+    "fmla v6.4s, v19.4s, v17.4s\n"
+    "fmla v8.4s, v19.4s, v10.4s\n"
+    "fmla v5.4s, v19.4s, v12.4s\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "fmla v6.4s, v18.4s, v14.4s\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "str s4, [%[outptr0]]\n"
+    "fmla v8.4s, v26.4s, v17.4s\n"
+    "str s3, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v5.4s, v26.4s, v13.4s\n"
+    "str s2, [%[outptr0], x19]\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "fmla v8.4s, v23.4s, v14.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "str s1, [x21]\n"
+    "fmla v5.4s, v18.4s, v10.4s\n"
+    "fmax v9.4s, v9.4s, v29.4s\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v8.4s, v8.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "str s9, [x21, %[output_col_stride1]]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "str s8, [x21, x19]\n"
+    "str s7, [x14]\n"
+    "str s6, [x14, %[output_col_stride1]]\n"
+    "add x21, x21, #4\n"
+    "fmla v5.4s, v25.4s, v14.4s\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "str s5, [x14, x19]\n"
+    "add x14, x14, #4\n"
+    "7:\n"
+    : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
+}
 
-    asm volatile (
-        "qU22B .req q0\n" "qU23B .req q0\n" "qW22A .req q0\n"
-        "vU22B .req v0\n" "vU23B .req v0\n" "vW22A .req v0\n"
-        "qV12A .req q1\n" "qW11B .req q1\n"
-        "vV12A .req v1\n" "vW11B .req v1\n"
-        "qU41A .req q2\n" "qU32B .req q2\n" "qU33A .req q2\n" "qV13B .req q2\n"
-        "vU41A .req v2\n" "vU32B .req v2\n" "vU33A .req v2\n" "vV13B .req v2\n"
-        "qU42B .req q3\n" "qU13B .req q3\n" "qU44B .req q3\n" "qU55A .req q3\n"
-        "vU42B .req v3\n" "vU13B .req v3\n" "vU44B .req v3\n" "vU55A .req v3\n"
-        "qU34B .req q4\n" "qU15A .req q4\n" "qU42A .req q4\n" "qU44A .req q4\n" "qU12B .req q4\n"
-        "vU34B .req v4\n" "vU15A .req v4\n" "vU42A .req v4\n" "vU44A .req v4\n" "vU12B .req v4\n"
-        "qU33B .req q5\n" "qU52A .req q5\n" "qW23A .req q5\n"
-        "vU33B .req v5\n" "vU52A .req v5\n" "vW23A .req v5\n"
-        "qV31A .req q6\n" "qU13A .req q6\n" "qV12B .req q6\n"
-        "vV31A .req v6\n" "vU13A .req v6\n" "vV12B .req v6\n"
-        "qU35B .req q7\n" "qU51B .req q7\n" "qV11A .req q7\n" "qU53B .req q7\n"
-        "vU35B .req v7\n" "vU51B .req v7\n" "vV11A .req v7\n" "vU53B .req v7\n"
-        "qW21A .req q8\n" "qV22B .req q8\n"
-        "vW21A .req v8\n" "vV22B .req v8\n"
-        "qV33B .req q9\n" "qU14A .req q9\n" "qV23A .req q9\n" "qU25B .req q9\n"
-        "vV33B .req v9\n" "vU14A .req v9\n" "vV23A .req v9\n" "vU25B .req v9\n"
-        "qW21B .req q10\n" "qV32A .req q10\n" "qU35A .req q10\n"
-        "vW21B .req v10\n" "vV32A .req v10\n" "vU35A .req v10\n"
-        "qV11B .req q11\n" "qU15B .req q11\n" "qV33A .req q11\n"
-        "vV11B .req v11\n" "vU15B .req v11\n" "vV33A .req v11\n"
-        "qU11B .req q12\n" "qW23B .req q12\n" "qU45A .req q12\n"
-        "vU11B .req v12\n" "vW23B .req v12\n" "vU45A .req v12\n"
-        "qW11A .req q13\n" "qU45B .req q13\n" "qU52B .req q13\n"
-        "vW11A .req v13\n" "vU45B .req v13\n" "vU52B .req v13\n"
-        "qU55B .req q14\n" "qU25A .req q14\n" "qV21A .req q14\n"
-        "vU55B .req v14\n" "vU25A .req v14\n" "vV21A .req v14\n"
-        "qU53A .req q15\n" "qV21B .req q15\n" "qU31A .req q15\n"
-        "vU53A .req v15\n" "vV21B .req v15\n" "vU31A .req v15\n"
-        "qW13B .req q16\n" "qU23A .req q16\n"
-        "vW13B .req v16\n" "vU23A .req v16\n"
-        "qW33B .req q17\n" "qW33A .req q17\n"
-        "vW33B .req v17\n" "vW33A .req v17\n"
-        "qU24B .req q18\n" "qU32A .req q18\n" "qV31B .req q18\n" "qV13A .req q18\n"
-        "vU24B .req v18\n" "vU32A .req v18\n" "vV31B .req v18\n" "vV13A .req v18\n"
-        "qU31B .req q19\n" "qU11A .req q19\n" "qU54B .req q19\n" "qU43A .req q19\n"
-        "vU31B .req v19\n" "vU11A .req v19\n" "vU54B .req v19\n" "vU43A .req v19\n"
-        "qU24A .req q20\n" "qW12B .req q20\n" "qU54A .req q20\n"
-        "vU24A .req v20\n" "vW12B .req v20\n" "vU54A .req v20\n"
-        "qV23B .req q21\n" "qW12A .req q21\n"
-        "vV23B .req v21\n" "vW12A .req v21\n"
-        "qW32A .req q22\n" "qU43B .req q22\n"
-        "vW32A .req v22\n" "vU43B .req v22\n"
-        "qW31A .req q23\n" "qV32B .req q23\n"
-        "vW31A .req v23\n" "vV32B .req v23\n"
-        "qU22A .req q24\n" "qW31B .req q24\n"
-        "vU22A .req v24\n" "vW31B .req v24\n"
-        "qU21B .req q25\n" "qV22A .req q25\n"
-        "vU21B .req v25\n" "vV22A .req v25\n"
-        "qU34A .req q26\n" "qW22B .req q26\n" "qU12A .req q26\n"
-        "vU34A .req v26\n" "vW22B .req v26\n" "vU12A .req v26\n"
-        "qW13A .req q27\n" "qU51A .req q27\n"
-        "vW13A .req v27\n" "vU51A .req v27\n"
-        "qW32B .req q28\n"
-        "vW32B .req v28\n"
-        "qU41B .req q29\n" "qU14B .req q29\n"
-        "vU41B .req v29\n" "vU14B .req v29\n"
-        "qU21A .req q30\n"
-        "vU21A .req v30\n"
-
-        "uptr1 .req x0\n"
-        "uptr2 .req x1\n"
-        "uptr3 .req x2\n"
-        "uptr4 .req x3\n"
-
-        "u_col_stride1 .req %x[u_col_stride]\n"
-        "u_col_stride2 .req x4\n"
-        "u_col_stride3 .req x5\n"
-        "u_col_stride4 .req x6\n"
-
-        "wptr1 .req x7\n"
-        "wptr2 .req x8\n"
-        "w_col_stride1 .req %x[w_col_stride]\n"
-        "w_col_stride2 .req x9\n"
-
-        "vptr1 .req x10\n"
-        "vptr2 .req x11\n"
-        "v_col_stride1 .req %x[v_col_stride]\n"
-        "v_col_stride2 .req x12\n"
-
-        // Prepare strides and pointers
-        "add uptr1, %x[uptr0], %x[u_row_stride]\n"
-        "add uptr2,    uptr1 , %x[u_row_stride]\n"
-        "add uptr3,    uptr2 , %x[u_row_stride]\n"
-        "add uptr4,    uptr3 , %x[u_row_stride]\n"
-        "add u_col_stride2, u_col_stride1, u_col_stride1\n"
-        "add u_col_stride3, u_col_stride2, u_col_stride1\n"
-        "add u_col_stride4, u_col_stride3, u_col_stride1\n"
-
-        "add wptr1, %x[wptr0], %x[w_row_stride]\n"
-        "add wptr2,    wptr1 , %x[w_row_stride]\n"
-        "add w_col_stride2, w_col_stride1, w_col_stride1\n"
-
-        "add vptr1, %x[vptr0], %x[v_row_stride]\n"
-        "add vptr2,    vptr1 , %x[v_row_stride]\n"
-        "add v_col_stride2, v_col_stride1, v_col_stride1\n"
-
-        // Pre-load for A
-        "ldr qW13A, [%x[wptr0], w_col_stride2]\n"
-        "ldr qW23A, [wptr1, w_col_stride2]\n"
-        "ldr qW33A, [wptr2, w_col_stride2]\n"
-        "ldr qW12A, [%x[wptr0], w_col_stride1]\n"
-        "ldr qU15A, [%x[uptr0], u_col_stride4]\n"
-        "ldr qW22A, [wptr1, w_col_stride1]\n"
-        "ldr qU14A, [%x[uptr0], u_col_stride3]\n"
-        "ldr qW32A, [wptr2, w_col_stride1]\n"
-        "ldr qU13A, [%x[uptr0], u_col_stride2]\n"
-        "ldr qU25A, [uptr1, u_col_stride4]\n"
-        "ldr qU24A, [uptr1, u_col_stride3]\n"
-        "ldr qW11A, [%x[wptr0]], #0x10\n"
-        "ldr qU23A, [uptr1, u_col_stride2]\n"
-        "ldr qW21A, [wptr1], #0x10\n"
-        "ldr qW31A, [wptr2], #0x10\n"
-        "ldr qU34A, [uptr2, u_col_stride3]\n"
-        "ldr qU35A, [uptr2, u_col_stride4]\n"
-
-        // First part of A
-        "fmul vV13A.4s, vU15A.4s, vW13A.4s\n"
-        "ldr qU33A, [uptr2, u_col_stride2]\n"
-        "fmul vV12A.4s, vU14A.4s, vW13A.4s\n"
-        "cbz %x[n_iters], 2f\n"  // Jump to tail if not looping
-
-        "1:"  // Main loop, double unrolled
-        // A Part
-        "fmla vV13A.4s, vU14A.4s, vW12A.4s\n"
-        "ldr qU45A, [uptr3, u_col_stride4]\n"
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV13A.4s, vU13A.4s, vW11A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV13A.4s, vU25A.4s, vW23A.4s\n"
-        "fmul vV23A.4s, vU25A.4s, vW13A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmla vV13A.4s, vU24A.4s, vW22A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV23A.4s, vU24A.4s, vW12A.4s\n"
-        "ldr qU55A, [uptr4, u_col_stride4]\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "fmla vV13A.4s, vU23A.4s, vW21A.4s\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "fmla vV23A.4s, vU23A.4s, vW11A.4s\n"
-        "ldr qU54A, [uptr4, u_col_stride3]\n"
-        "fmla vV13A.4s, vU35A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU35A.4s, vW23A.4s\n"
-        "fmul vV33A.4s, vU35A.4s, vW13A.4s\n"
-        "ldr qU53A, [uptr4, u_col_stride2]\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "fmla vV13A.4s, vU34A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "fmla vV23A.4s, vU34A.4s, vW22A.4s\n"
-        "fmul vV32A.4s, vU34A.4s, vW13A.4s\n"
-        "fmla vV33A.4s, vU34A.4s, vW12A.4s\n"
-        "ldr qU12A, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "fmla vV13A.4s, vU33A.4s, vW31A.4s\n"
-        "str qV13A, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "fmla vV23A.4s, vU33A.4s, vW21A.4s\n"
-        "fmul vV31A.4s, vU33A.4s, vW13A.4s\n"
-        "ldr qW13B, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV32A.4s, vU33A.4s, vW12A.4s\n"
-        "fmla vV33A.4s, vU33A.4s, vW11A.4s\n"
-        "ldr qU22A, [uptr1, u_col_stride1]\n"
-        "fmla vV23A.4s, vU45A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU45A.4s, vW23A.4s\n"
-        "ldr qU32A, [uptr2, u_col_stride1]\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU44A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU44A.4s, vW23A.4s\n"
-        "fmla vV33A.4s, vU44A.4s, vW22A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "fmla vV23A.4s, vU43A.4s, vW31A.4s\n"
-        "str qV23A, [vptr1, v_col_stride2]\n"
-        "fmla vV31A.4s, vU43A.4s, vW23A.4s\n"
-        "ldr qW23B, [wptr1, w_col_stride2]\n"
-        "fmla vV32A.4s, vU43A.4s, vW22A.4s\n"
-        "fmla vV33A.4s, vU43A.4s, vW21A.4s\n"
-        "ldr qU52A, [uptr4, u_col_stride1]\n"
-        "fmla vV33A.4s, vU55A.4s, vW33A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV32A.4s, vU54A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU54A.4s, vW32A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV31A.4s, vU53A.4s, vW33A.4s\n"
-        "ldr qW33B, [wptr2, w_col_stride2]\n"
-        "fmla vV32A.4s, vU53A.4s, vW32A.4s\n"
-        "fmla vV33A.4s, vU53A.4s, vW31A.4s\n"
-        "str qV33A, [vptr2, v_col_stride2]\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU51A, [uptr4], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "ldr qW12B, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "ldr qU15B, [%x[uptr0], u_col_stride4]\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "ldr qW22B, [wptr1, w_col_stride1]\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "ldr qU14B, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "ldr qW32B, [wptr2, w_col_stride1]\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "ldr qU13B, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV31A.4s, vU32A.4s, vW12A.4s\n"
-        "ldr qU25B, [uptr1, u_col_stride4]\n"
-        "fmla vV32A.4s, vU32A.4s, vW11A.4s\n"
-        "ldr qU24B, [uptr1, u_col_stride3]\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, v_col_stride1]\n"
-        "fmla vV31A.4s, vU42A.4s, vW22A.4s\n"
-        "fmla vV32A.4s, vU42A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU52A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU52A.4s, vW31A.4s\n"
-        "str qV32A, [vptr2, v_col_stride1]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "ldr qW11B, [%x[wptr0]], #0x10\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "ldr qU23B, [uptr1, u_col_stride2]\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "ldr qW21B, [wptr1], #0x10\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "ldr qW31B, [wptr2], #0x10\n"
-        "fmla vV31A.4s, vU31A.4s, vW11A.4s\n"
-        "ldr qU34B, [uptr2, u_col_stride3]\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-        "fmla vV31A.4s, vU41A.4s, vW21A.4s\n"
-        "ldr qU35B, [uptr2, u_col_stride4]\n"
-        "fmla vV31A.4s, vU51A.4s, vW31A.4s\n"
-        "str qV31A, [vptr2], #0x10\n"
-
-        // B Part
-        "fmul vV13B.4s, vU15B.4s, vW13B.4s\n"
-        "ldr qU33B, [uptr2, u_col_stride2]\n"
-        "fmul vV12B.4s, vU14B.4s, vW13B.4s\n"
-        "fmla vV13B.4s, vU14B.4s, vW12B.4s\n"
-        "ldr qU45B, [uptr3, u_col_stride4]\n"
-        "fmul vV11B.4s, vU13B.4s, vW13B.4s\n"
-        "fmla vV12B.4s, vU13B.4s, vW12B.4s\n"
-        "fmla vV13B.4s, vU13B.4s, vW11B.4s\n"
-        "ldr qU44B, [uptr3, u_col_stride3]\n"
-        "fmla vV13B.4s, vU25B.4s, vW23B.4s\n"
-        "fmul vV23B.4s, vU25B.4s, vW13B.4s\n"
-        "ldr qU43B, [uptr3, u_col_stride2]\n"
-        "fmla vV12B.4s, vU24B.4s, vW23B.4s\n"
-        "fmla vV13B.4s, vU24B.4s, vW22B.4s\n"
-        "fmul vV22B.4s, vU24B.4s, vW13B.4s\n"
-        "fmla vV23B.4s, vU24B.4s, vW12B.4s\n"
-        "ldr qU55B, [uptr4, u_col_stride4]\n"
-        "fmla vV11B.4s, vU23B.4s, vW23B.4s\n"
-        "fmla vV12B.4s, vU23B.4s, vW22B.4s\n"
-        "fmla vV13B.4s, vU23B.4s, vW21B.4s\n"
-        "fmul vV21B.4s, vU23B.4s, vW13B.4s\n"
-        "fmla vV22B.4s, vU23B.4s, vW12B.4s\n"
-        "fmla vV23B.4s, vU23B.4s, vW11B.4s\n"
-        "ldr qU54B, [uptr4, u_col_stride3]\n"
-        "fmla vV13B.4s, vU35B.4s, vW33B.4s\n"
-        "fmla vV23B.4s, vU35B.4s, vW23B.4s\n"
-        "fmul vV33B.4s, vU35B.4s, vW13B.4s\n"
-        "ldr qU53B, [uptr4, u_col_stride2]\n"
-        "fmla vV12B.4s, vU34B.4s, vW33B.4s\n"
-        "fmla vV13B.4s, vU34B.4s, vW32B.4s\n"
-        "fmla vV22B.4s, vU34B.4s, vW23B.4s\n"
-        "fmla vV23B.4s, vU34B.4s, vW22B.4s\n"
-        "fmul vV32B.4s, vU34B.4s, vW13B.4s\n"
-        "fmla vV33B.4s, vU34B.4s, vW12B.4s\n"
-        "ldr qU12B, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV11B.4s, vU33B.4s, vW33B.4s\n"
-        "fmla vV12B.4s, vU33B.4s, vW32B.4s\n"
-        "fmla vV13B.4s, vU33B.4s, vW31B.4s\n"
-        "str qV13B, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21B.4s, vU33B.4s, vW23B.4s\n"
-        "fmla vV22B.4s, vU33B.4s, vW22B.4s\n"
-        "fmla vV23B.4s, vU33B.4s, vW21B.4s\n"
-        "fmul vV31B.4s, vU33B.4s, vW13B.4s\n"
-        "ldr qW13A, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV32B.4s, vU33B.4s, vW12B.4s\n"
-        "fmla vV33B.4s, vU33B.4s, vW11B.4s\n"
-        "ldr qU22B, [uptr1, u_col_stride1]\n"
-        "fmla vV23B.4s, vU45B.4s, vW33B.4s\n"
-        "fmla vV33B.4s, vU45B.4s, vW23B.4s\n"
-        "ldr qU32B, [uptr2, u_col_stride1]\n"
-        "fmla vV22B.4s, vU44B.4s, vW33B.4s\n"
-        "fmla vV23B.4s, vU44B.4s, vW32B.4s\n"
-        "fmla vV32B.4s, vU44B.4s, vW23B.4s\n"
-        "fmla vV33B.4s, vU44B.4s, vW22B.4s\n"
-        "ldr qU42B, [uptr3, u_col_stride1]\n"
-        "fmla vV21B.4s, vU43B.4s, vW33B.4s\n"
-        "fmla vV22B.4s, vU43B.4s, vW32B.4s\n"
-        "fmla vV23B.4s, vU43B.4s, vW31B.4s\n"
-        "str qV23B, [vptr1, v_col_stride2]\n"
-        "fmla vV31B.4s, vU43B.4s, vW23B.4s\n"
-        "ldr qW23A, [wptr1, w_col_stride2]\n"
-        "fmla vV32B.4s, vU43B.4s, vW22B.4s\n"
-        "fmla vV33B.4s, vU43B.4s, vW21B.4s\n"
-        "ldr qU52B, [uptr4, u_col_stride1]\n"
-        "fmla vV33B.4s, vU55B.4s, vW33B.4s\n"
-        "ldr qU11B, [%x[uptr0]], #0x10\n"
-        "fmla vV32B.4s, vU54B.4s, vW33B.4s\n"
-        "fmla vV33B.4s, vU54B.4s, vW32B.4s\n"
-        "ldr qU21B, [uptr1], #0x10\n"
-        "fmla vV31B.4s, vU53B.4s, vW33B.4s\n"
-        "ldr qW33A, [wptr2, w_col_stride2]\n"
-        "fmla vV32B.4s, vU53B.4s, vW32B.4s\n"
-        "fmla vV33B.4s, vU53B.4s, vW31B.4s\n"
-        "str qV33B, [vptr2, v_col_stride2]\n"
-        "fmla vV11B.4s, vU12B.4s, vW12B.4s\n"
-        "ldr qU31B, [uptr2], #0x10\n"
-        "fmla vV12B.4s, vU12B.4s, vW11B.4s\n"
-        "ldr qU41B, [uptr3], #0x10\n"
-        "fmla vV11B.4s, vU22B.4s, vW22B.4s\n"
-        "ldr qU51B, [uptr4], #0x10\n"
-        "fmla vV12B.4s, vU22B.4s, vW21B.4s\n"
-        "ldr qW12A, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV21B.4s, vU22B.4s, vW12B.4s\n"
-        "ldr qU15A, [%x[uptr0], u_col_stride4]\n"
-        "fmla vV22B.4s, vU22B.4s, vW11B.4s\n"
-        "ldr qW22A, [wptr1, w_col_stride1]\n"
-        "fmla vV11B.4s, vU32B.4s, vW32B.4s\n"
-        "ldr qU14A, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12B.4s, vU32B.4s, vW31B.4s\n"
-        "str qV12B, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV21B.4s, vU32B.4s, vW22B.4s\n"
-        "ldr qW32A, [wptr2, w_col_stride1]\n"
-        "fmla vV22B.4s, vU32B.4s, vW21B.4s\n"
-        "ldr qU13A, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV31B.4s, vU32B.4s, vW12B.4s\n"
-        "ldr qU25A, [uptr1, u_col_stride4]\n"
-        "fmla vV32B.4s, vU32B.4s, vW11B.4s\n"
-        "ldr qU24A, [uptr1, u_col_stride3]\n"
-        "fmla vV21B.4s, vU42B.4s, vW32B.4s\n"
-        "fmla vV22B.4s, vU42B.4s, vW31B.4s\n"
-        "str qV22B, [vptr1, v_col_stride1]\n"
-        "fmla vV31B.4s, vU42B.4s, vW22B.4s\n"
-        "fmla vV32B.4s, vU42B.4s, vW21B.4s\n"
-        "fmla vV31B.4s, vU52B.4s, vW32B.4s\n"
-        "subs %x[n_iters], %x[n_iters], #1\n"
-        "fmla vV32B.4s, vU52B.4s, vW31B.4s\n"
-        "str qV32B, [vptr2, v_col_stride1]\n"
-        "fmla vV11B.4s, vU11B.4s, vW11B.4s\n"
-        "ldr qW11A, [%x[wptr0]], #0x10\n"
-        "fmla vV11B.4s, vU21B.4s, vW21B.4s\n"
-        "ldr qU23A, [uptr1, u_col_stride2]\n"
-        "fmla vV21B.4s, vU21B.4s, vW11B.4s\n"
-        "ldr qW21A, [wptr1], #0x10\n"
-        "fmla vV11B.4s, vU31B.4s, vW31B.4s\n"
-        "str qV11B, [%x[vptr0]], #0x10\n"
-        "fmla vV21B.4s, vU31B.4s, vW21B.4s\n"
-        "ldr qW31A, [wptr2], #0x10\n"
-        "fmla vV31B.4s, vU31B.4s, vW11B.4s\n"
-        "ldr qU34A, [uptr2, u_col_stride3]\n"
-        "fmla vV21B.4s, vU41B.4s, vW31B.4s\n"
-        "str qV21B, [vptr1], #0x10\n"
-        "fmla vV31B.4s, vU41B.4s, vW21B.4s\n"
-        "ldr qU35A, [uptr2, u_col_stride4]\n"
-        "fmla vV31B.4s, vU51B.4s, vW31B.4s\n"
-        "str qV31B, [vptr2], #0x10\n"
-
-        // First part of A
-        "fmul vV13A.4s, vU15A.4s, vW13A.4s\n"
-        "ldr qU33A, [uptr2, u_col_stride2]\n"
-        "fmul vV12A.4s, vU14A.4s, vW13A.4s\n"
-        "bne 1b\n"  // Loop
-
-        "2:"  // Tail dispatch
-        "cbnz %w[odd_tail], 3f\n"
-
-        // Even tail
-        // A Part
-        "fmla vV13A.4s, vU14A.4s, vW12A.4s\n"
-        "ldr qU45A, [uptr3, u_col_stride4]\n"
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV13A.4s, vU13A.4s, vW11A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV13A.4s, vU25A.4s, vW23A.4s\n"
-        "fmul vV23A.4s, vU25A.4s, vW13A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmla vV13A.4s, vU24A.4s, vW22A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV23A.4s, vU24A.4s, vW12A.4s\n"
-        "ldr qU55A, [uptr4, u_col_stride4]\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "fmla vV13A.4s, vU23A.4s, vW21A.4s\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "fmla vV23A.4s, vU23A.4s, vW11A.4s\n"
-        "ldr qU54A, [uptr4, u_col_stride3]\n"
-        "fmla vV13A.4s, vU35A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU35A.4s, vW23A.4s\n"
-        "fmul vV33A.4s, vU35A.4s, vW13A.4s\n"
-        "ldr qU53A, [uptr4, u_col_stride2]\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "fmla vV13A.4s, vU34A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "fmla vV23A.4s, vU34A.4s, vW22A.4s\n"
-        "fmul vV32A.4s, vU34A.4s, vW13A.4s\n"
-        "fmla vV33A.4s, vU34A.4s, vW12A.4s\n"
-        "ldr qU12A, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "fmla vV13A.4s, vU33A.4s, vW31A.4s\n"
-        "str qV13A, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "fmla vV23A.4s, vU33A.4s, vW21A.4s\n"
-        "fmul vV31A.4s, vU33A.4s, vW13A.4s\n"
-        "ldr qW13B, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV32A.4s, vU33A.4s, vW12A.4s\n"
-        "fmla vV33A.4s, vU33A.4s, vW11A.4s\n"
-        "ldr qU22A, [uptr1, u_col_stride1]\n"
-        "fmla vV23A.4s, vU45A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU45A.4s, vW23A.4s\n"
-        "ldr qU32A, [uptr2, u_col_stride1]\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU44A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU44A.4s, vW23A.4s\n"
-        "fmla vV33A.4s, vU44A.4s, vW22A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "fmla vV23A.4s, vU43A.4s, vW31A.4s\n"
-        "str qV23A, [vptr1, v_col_stride2]\n"
-        "fmla vV31A.4s, vU43A.4s, vW23A.4s\n"
-        "ldr qW23B, [wptr1, w_col_stride2]\n"
-        "fmla vV32A.4s, vU43A.4s, vW22A.4s\n"
-        "fmla vV33A.4s, vU43A.4s, vW21A.4s\n"
-        "ldr qU52A, [uptr4, u_col_stride1]\n"
-        "fmla vV33A.4s, vU55A.4s, vW33A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV32A.4s, vU54A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU54A.4s, vW32A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV31A.4s, vU53A.4s, vW33A.4s\n"
-        "ldr qW33B, [wptr2, w_col_stride2]\n"
-        "fmla vV32A.4s, vU53A.4s, vW32A.4s\n"
-        "fmla vV33A.4s, vU53A.4s, vW31A.4s\n"
-        "str qV33A, [vptr2, v_col_stride2]\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU51A, [uptr4], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "ldr qW12B, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "ldr qU15B, [%x[uptr0], u_col_stride4]\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "ldr qW22B, [wptr1, w_col_stride1]\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "ldr qU14B, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "ldr qW32B, [wptr2, w_col_stride1]\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "ldr qU13B, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV31A.4s, vU32A.4s, vW12A.4s\n"
-        "ldr qU25B, [uptr1, u_col_stride4]\n"
-        "fmla vV32A.4s, vU32A.4s, vW11A.4s\n"
-        "ldr qU24B, [uptr1, u_col_stride3]\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, v_col_stride1]\n"
-        "fmla vV31A.4s, vU42A.4s, vW22A.4s\n"
-        "fmla vV32A.4s, vU42A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU52A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU52A.4s, vW31A.4s\n"
-        "str qV32A, [vptr2, v_col_stride1]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "ldr qW11B, [%x[wptr0]], #0x10\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "ldr qU23B, [uptr1, u_col_stride2]\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "ldr qW21B, [wptr1], #0x10\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "ldr qW31B, [wptr2], #0x10\n"
-        "fmla vV31A.4s, vU31A.4s, vW11A.4s\n"
-        "ldr qU34B, [uptr2, u_col_stride3]\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-        "fmla vV31A.4s, vU41A.4s, vW21A.4s\n"
-        "ldr qU35B, [uptr2, u_col_stride4]\n"
-        "fmla vV31A.4s, vU51A.4s, vW31A.4s\n"
-        "str qV31A, [vptr2], #0x10\n"
-
-        // B Part
-        "fmul vV13B.4s, vU15B.4s, vW13B.4s\n"
-        "ldr qU33B, [uptr2, u_col_stride2]\n"
-        "fmul vV12B.4s, vU14B.4s, vW13B.4s\n"
-        "fmla vV13B.4s, vU14B.4s, vW12B.4s\n"
-        "ldr qU45B, [uptr3, u_col_stride4]\n"
-        "fmul vV11B.4s, vU13B.4s, vW13B.4s\n"
-        "fmla vV12B.4s, vU13B.4s, vW12B.4s\n"
-        "fmla vV13B.4s, vU13B.4s, vW11B.4s\n"
-        "ldr qU44B, [uptr3, u_col_stride3]\n"
-        "fmla vV13B.4s, vU25B.4s, vW23B.4s\n"
-        "fmul vV23B.4s, vU25B.4s, vW13B.4s\n"
-        "ldr qU43B, [uptr3, u_col_stride2]\n"
-        "fmla vV12B.4s, vU24B.4s, vW23B.4s\n"
-        "fmla vV13B.4s, vU24B.4s, vW22B.4s\n"
-        "fmul vV22B.4s, vU24B.4s, vW13B.4s\n"
-        "fmla vV23B.4s, vU24B.4s, vW12B.4s\n"
-        "ldr qU55B, [uptr4, u_col_stride4]\n"
-        "fmla vV11B.4s, vU23B.4s, vW23B.4s\n"
-        "fmla vV12B.4s, vU23B.4s, vW22B.4s\n"
-        "fmla vV13B.4s, vU23B.4s, vW21B.4s\n"
-        "fmul vV21B.4s, vU23B.4s, vW13B.4s\n"
-        "fmla vV22B.4s, vU23B.4s, vW12B.4s\n"
-        "fmla vV23B.4s, vU23B.4s, vW11B.4s\n"
-        "ldr qU54B, [uptr4, u_col_stride3]\n"
-        "fmla vV13B.4s, vU35B.4s, vW33B.4s\n"
-        "fmla vV23B.4s, vU35B.4s, vW23B.4s\n"
-        "fmul vV33B.4s, vU35B.4s, vW13B.4s\n"
-        "ldr qU53B, [uptr4, u_col_stride2]\n"
-        "fmla vV12B.4s, vU34B.4s, vW33B.4s\n"
-        "fmla vV13B.4s, vU34B.4s, vW32B.4s\n"
-        "fmla vV22B.4s, vU34B.4s, vW23B.4s\n"
-        "fmla vV23B.4s, vU34B.4s, vW22B.4s\n"
-        "fmul vV32B.4s, vU34B.4s, vW13B.4s\n"
-        "fmla vV33B.4s, vU34B.4s, vW12B.4s\n"
-        "ldr qU12B, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV11B.4s, vU33B.4s, vW33B.4s\n"
-        "fmla vV12B.4s, vU33B.4s, vW32B.4s\n"
-        "fmla vV13B.4s, vU33B.4s, vW31B.4s\n"
-        "str qV13B, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21B.4s, vU33B.4s, vW23B.4s\n"
-        "fmla vV22B.4s, vU33B.4s, vW22B.4s\n"
-        "fmla vV23B.4s, vU33B.4s, vW21B.4s\n"
-        "fmul vV31B.4s, vU33B.4s, vW13B.4s\n"
-        "fmla vV32B.4s, vU33B.4s, vW12B.4s\n"
-        "fmla vV33B.4s, vU33B.4s, vW11B.4s\n"
-        "ldr qU22B, [uptr1, u_col_stride1]\n"
-        "fmla vV23B.4s, vU45B.4s, vW33B.4s\n"
-        "fmla vV33B.4s, vU45B.4s, vW23B.4s\n"
-        "ldr qU32B, [uptr2, u_col_stride1]\n"
-        "fmla vV22B.4s, vU44B.4s, vW33B.4s\n"
-        "fmla vV23B.4s, vU44B.4s, vW32B.4s\n"
-        "fmla vV32B.4s, vU44B.4s, vW23B.4s\n"
-        "fmla vV33B.4s, vU44B.4s, vW22B.4s\n"
-        "ldr qU42B, [uptr3, u_col_stride1]\n"
-        "fmla vV21B.4s, vU43B.4s, vW33B.4s\n"
-        "fmla vV22B.4s, vU43B.4s, vW32B.4s\n"
-        "fmla vV23B.4s, vU43B.4s, vW31B.4s\n"
-        "str qV23B, [vptr1, v_col_stride2]\n"
-        "fmla vV31B.4s, vU43B.4s, vW23B.4s\n"
-        "fmla vV32B.4s, vU43B.4s, vW22B.4s\n"
-        "fmla vV33B.4s, vU43B.4s, vW21B.4s\n"
-        "ldr qU52B, [uptr4, u_col_stride1]\n"
-        "fmla vV33B.4s, vU55B.4s, vW33B.4s\n"
-        "ldr qU11B, [%x[uptr0]], #0x10\n"
-        "fmla vV32B.4s, vU54B.4s, vW33B.4s\n"
-        "fmla vV33B.4s, vU54B.4s, vW32B.4s\n"
-        "ldr qU21B, [uptr1], #0x10\n"
-        "fmla vV31B.4s, vU53B.4s, vW33B.4s\n"
-        "fmla vV32B.4s, vU53B.4s, vW32B.4s\n"
-        "fmla vV33B.4s, vU53B.4s, vW31B.4s\n"
-        "str qV33B, [vptr2, v_col_stride2]\n"
-        "fmla vV11B.4s, vU12B.4s, vW12B.4s\n"
-        "ldr qU31B, [uptr2], #0x10\n"
-        "fmla vV12B.4s, vU12B.4s, vW11B.4s\n"
-        "ldr qU41B, [uptr3], #0x10\n"
-        "fmla vV11B.4s, vU22B.4s, vW22B.4s\n"
-        "ldr qU51B, [uptr4], #0x10\n"
-        "fmla vV12B.4s, vU22B.4s, vW21B.4s\n"
-        "fmla vV21B.4s, vU22B.4s, vW12B.4s\n"
-        "fmla vV22B.4s, vU22B.4s, vW11B.4s\n"
-        "fmla vV11B.4s, vU32B.4s, vW32B.4s\n"
-        "fmla vV12B.4s, vU32B.4s, vW31B.4s\n"
-        "str qV12B, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV21B.4s, vU32B.4s, vW22B.4s\n"
-        "fmla vV22B.4s, vU32B.4s, vW21B.4s\n"
-        "fmla vV31B.4s, vU32B.4s, vW12B.4s\n"
-        "fmla vV32B.4s, vU32B.4s, vW11B.4s\n"
-        "fmla vV21B.4s, vU42B.4s, vW32B.4s\n"
-        "fmla vV22B.4s, vU42B.4s, vW31B.4s\n"
-        "str qV22B, [vptr1, v_col_stride1]\n"
-        "fmla vV31B.4s, vU42B.4s, vW22B.4s\n"
-        "fmla vV32B.4s, vU42B.4s, vW21B.4s\n"
-        "fmla vV31B.4s, vU52B.4s, vW32B.4s\n"
-        "subs %x[n_iters], %x[n_iters], #1\n"
-        "fmla vV32B.4s, vU52B.4s, vW31B.4s\n"
-        "str qV32B, [vptr2, v_col_stride1]\n"
-        "fmla vV11B.4s, vU11B.4s, vW11B.4s\n"
-        "fmla vV11B.4s, vU21B.4s, vW21B.4s\n"
-        "fmla vV21B.4s, vU21B.4s, vW11B.4s\n"
-        "fmla vV11B.4s, vU31B.4s, vW31B.4s\n"
-        "str qV11B, [%x[vptr0]], #0x10\n"
-        "fmla vV21B.4s, vU31B.4s, vW21B.4s\n"
-        "fmla vV31B.4s, vU31B.4s, vW11B.4s\n"
-        "fmla vV21B.4s, vU41B.4s, vW31B.4s\n"
-        "str qV21B, [vptr1], #0x10\n"
-        "fmla vV31B.4s, vU41B.4s, vW21B.4s\n"
-        "fmla vV31B.4s, vU51B.4s, vW31B.4s\n"
-        "str qV31B, [vptr2], #0x10\n"
-
-        "b 4f\n"  // Branch to end of method
-
-        "3:"  // Odd tail, finish off A
-        "fmla vV13A.4s, vU14A.4s, vW12A.4s\n"
-        "ldr qU45A, [uptr3, u_col_stride4]\n"
-        "fmul vV11A.4s, vU13A.4s, vW13A.4s\n"
-        "fmla vV12A.4s, vU13A.4s, vW12A.4s\n"
-        "fmla vV13A.4s, vU13A.4s, vW11A.4s\n"
-        "ldr qU44A, [uptr3, u_col_stride3]\n"
-        "fmla vV13A.4s, vU25A.4s, vW23A.4s\n"
-        "fmul vV23A.4s, vU25A.4s, vW13A.4s\n"
-        "ldr qU43A, [uptr3, u_col_stride2]\n"
-        "fmla vV12A.4s, vU24A.4s, vW23A.4s\n"
-        "fmla vV13A.4s, vU24A.4s, vW22A.4s\n"
-        "fmul vV22A.4s, vU24A.4s, vW13A.4s\n"
-        "fmla vV23A.4s, vU24A.4s, vW12A.4s\n"
-        "ldr qU55A, [uptr4, u_col_stride4]\n"
-        "fmla vV11A.4s, vU23A.4s, vW23A.4s\n"
-        "fmla vV12A.4s, vU23A.4s, vW22A.4s\n"
-        "fmla vV13A.4s, vU23A.4s, vW21A.4s\n"
-        "fmul vV21A.4s, vU23A.4s, vW13A.4s\n"
-        "fmla vV22A.4s, vU23A.4s, vW12A.4s\n"
-        "fmla vV23A.4s, vU23A.4s, vW11A.4s\n"
-        "ldr qU54A, [uptr4, u_col_stride3]\n"
-        "fmla vV13A.4s, vU35A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU35A.4s, vW23A.4s\n"
-        "fmul vV33A.4s, vU35A.4s, vW13A.4s\n"
-        "ldr qU53A, [uptr4, u_col_stride2]\n"
-        "fmla vV12A.4s, vU34A.4s, vW33A.4s\n"
-        "fmla vV13A.4s, vU34A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU34A.4s, vW23A.4s\n"
-        "fmla vV23A.4s, vU34A.4s, vW22A.4s\n"
-        "fmul vV32A.4s, vU34A.4s, vW13A.4s\n"
-        "fmla vV33A.4s, vU34A.4s, vW12A.4s\n"
-        "ldr qU12A, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV11A.4s, vU33A.4s, vW33A.4s\n"
-        "fmla vV12A.4s, vU33A.4s, vW32A.4s\n"
-        "fmla vV13A.4s, vU33A.4s, vW31A.4s\n"
-        "str qV13A, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21A.4s, vU33A.4s, vW23A.4s\n"
-        "fmla vV22A.4s, vU33A.4s, vW22A.4s\n"
-        "fmla vV23A.4s, vU33A.4s, vW21A.4s\n"
-        "fmul vV31A.4s, vU33A.4s, vW13A.4s\n"
-        "fmla vV32A.4s, vU33A.4s, vW12A.4s\n"
-        "fmla vV33A.4s, vU33A.4s, vW11A.4s\n"
-        "ldr qU22A, [uptr1, u_col_stride1]\n"
-        "fmla vV23A.4s, vU45A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU45A.4s, vW23A.4s\n"
-        "ldr qU32A, [uptr2, u_col_stride1]\n"
-        "fmla vV22A.4s, vU44A.4s, vW33A.4s\n"
-        "fmla vV23A.4s, vU44A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU44A.4s, vW23A.4s\n"
-        "fmla vV33A.4s, vU44A.4s, vW22A.4s\n"
-        "ldr qU42A, [uptr3, u_col_stride1]\n"
-        "fmla vV21A.4s, vU43A.4s, vW33A.4s\n"
-        "fmla vV22A.4s, vU43A.4s, vW32A.4s\n"
-        "fmla vV23A.4s, vU43A.4s, vW31A.4s\n"
-        "str qV23A, [vptr1, v_col_stride2]\n"
-        "fmla vV31A.4s, vU43A.4s, vW23A.4s\n"
-        "fmla vV32A.4s, vU43A.4s, vW22A.4s\n"
-        "fmla vV33A.4s, vU43A.4s, vW21A.4s\n"
-        "ldr qU52A, [uptr4, u_col_stride1]\n"
-        "fmla vV33A.4s, vU55A.4s, vW33A.4s\n"
-        "ldr qU11A, [%x[uptr0]], #0x10\n"
-        "fmla vV32A.4s, vU54A.4s, vW33A.4s\n"
-        "fmla vV33A.4s, vU54A.4s, vW32A.4s\n"
-        "ldr qU21A, [uptr1], #0x10\n"
-        "fmla vV31A.4s, vU53A.4s, vW33A.4s\n"
-        "fmla vV32A.4s, vU53A.4s, vW32A.4s\n"
-        "fmla vV33A.4s, vU53A.4s, vW31A.4s\n"
-        "str qV33A, [vptr2, v_col_stride2]\n"
-        "fmla vV11A.4s, vU12A.4s, vW12A.4s\n"
-        "ldr qU31A, [uptr2], #0x10\n"
-        "fmla vV12A.4s, vU12A.4s, vW11A.4s\n"
-        "ldr qU41A, [uptr3], #0x10\n"
-        "fmla vV11A.4s, vU22A.4s, vW22A.4s\n"
-        "ldr qU51A, [uptr4], #0x10\n"
-        "fmla vV12A.4s, vU22A.4s, vW21A.4s\n"
-        "fmla vV21A.4s, vU22A.4s, vW12A.4s\n"
-        "fmla vV22A.4s, vU22A.4s, vW11A.4s\n"
-        "fmla vV11A.4s, vU32A.4s, vW32A.4s\n"
-        "fmla vV12A.4s, vU32A.4s, vW31A.4s\n"
-        "str qV12A, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV21A.4s, vU32A.4s, vW22A.4s\n"
-        "fmla vV22A.4s, vU32A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU32A.4s, vW12A.4s\n"
-        "fmla vV32A.4s, vU32A.4s, vW11A.4s\n"
-        "fmla vV21A.4s, vU42A.4s, vW32A.4s\n"
-        "fmla vV22A.4s, vU42A.4s, vW31A.4s\n"
-        "str qV22A, [vptr1, v_col_stride1]\n"
-        "fmla vV31A.4s, vU42A.4s, vW22A.4s\n"
-        "fmla vV32A.4s, vU42A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU52A.4s, vW32A.4s\n"
-        "fmla vV32A.4s, vU52A.4s, vW31A.4s\n"
-        "str qV32A, [vptr2, v_col_stride1]\n"
-        "fmla vV11A.4s, vU11A.4s, vW11A.4s\n"
-        "fmla vV11A.4s, vU21A.4s, vW21A.4s\n"
-        "fmla vV21A.4s, vU21A.4s, vW11A.4s\n"
-        "fmla vV11A.4s, vU31A.4s, vW31A.4s\n"
-        "str qV11A, [%x[vptr0]], #0x10\n"
-        "fmla vV21A.4s, vU31A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU31A.4s, vW11A.4s\n"
-        "fmla vV21A.4s, vU41A.4s, vW31A.4s\n"
-        "str qV21A, [vptr1], #0x10\n"
-        "fmla vV31A.4s, vU41A.4s, vW21A.4s\n"
-        "fmla vV31A.4s, vU51A.4s, vW31A.4s\n"
-        "str qV31A, [vptr2], #0x10\n"
-
-        "4:"  // End of method
-        ".unreq uptr1\n" ".unreq uptr2\n" ".unreq uptr3\n" ".unreq uptr4\n"
-        ".unreq u_col_stride1\n" ".unreq u_col_stride2\n"
-        ".unreq u_col_stride3\n" ".unreq u_col_stride4\n"
-        ".unreq wptr1\n" ".unreq wptr2\n"
-        ".unreq w_col_stride1\n" ".unreq w_col_stride2\n"
-        ".unreq vptr1\n" ".unreq vptr2\n"
-        ".unreq v_col_stride1\n" ".unreq v_col_stride2\n"
-
-        ".unreq qU22B\n" ".unreq qW13B\n" ".unreq qW13A\n" ".unreq qU51B\n"
-        ".unreq qU54B\n" ".unreq qU45A\n" ".unreq qU15A\n" ".unreq qU41B\n"
-        ".unreq qU24B\n" ".unreq qU21A\n"
-        ".unreq qV11B\n" ".unreq qU51A\n" ".unreq qU35A\n" ".unreq qU12A\n"
-        ".unreq qU42B\n" ".unreq qU44B\n" ".unreq qU13B\n" ".unreq qW33A\n"
-        ".unreq qV31B\n" ".unreq qV23A\n" ".unreq qU31A\n" ".unreq qU35B\n" ".unreq qU13A\n"
-        ".unreq qV23B\n" ".unreq qU11A\n" ".unreq qU25A\n" ".unreq qU43A\n" ".unreq qU52B\n"
-        ".unreq qU24A\n" ".unreq qU23B\n" ".unreq qV21A\n" ".unreq qV32B\n"
-        ".unreq qV33B\n" ".unreq qW11A\n" ".unreq qU31B\n"
-        ".unreq qW12B\n" ".unreq qU33A\n" ".unreq qU14A\n" ".unreq qU22A\n"
-        ".unreq qU25B\n" ".unreq qU53B\n" ".unreq qU42A\n" ".unreq qU44A\n"
-        ".unreq qU43B\n" ".unreq qW31A\n" ".unreq qU11B\n"
-        ".unreq qW11B\n" ".unreq qW32A\n"
-        ".unreq qU12B\n" ".unreq qU34B\n" ".unreq qW21A\n"
-        ".unreq qU14B\n" ".unreq qV21B\n" ".unreq qW22A\n"
-        ".unreq qW23B\n" ".unreq qW23A\n" ".unreq qU21B\n"
-        ".unreq qU32B\n" ".unreq qU34A\n" ".unreq qU45B\n" ".unreq qV31A\n"
-        ".unreq qW12A\n" ".unreq qU33B\n" ".unreq qU15B\n"
-        ".unreq qW33B\n" ".unreq qU54A\n" ".unreq qU23A\n"
-        ".unreq qW32B\n" ".unreq qV33A\n" ".unreq qW31B\n" ".unreq qV12A\n"
-        ".unreq qV12B\n" ".unreq qU41A\n" ".unreq qU53A\n"
-        ".unreq qV13A\n" ".unreq qU32A\n" ".unreq qW22B\n"
-        ".unreq qV22B\n" ".unreq qU52A\n" ".unreq qV13B\n" ".unreq qV32A\n"
-        ".unreq qU55A\n" ".unreq qU55B\n" ".unreq qV22A\n" ".unreq qW21B\n"
-        ".unreq qV11A\n"
-        ".unreq vU22B\n" ".unreq vW13B\n" ".unreq vW13A\n" ".unreq vU51B\n"
-        ".unreq vU54B\n" ".unreq vU45A\n" ".unreq vU15A\n" ".unreq vU41B\n"
-        ".unreq vU24B\n" ".unreq vU21A\n"
-        ".unreq vV11B\n" ".unreq vU51A\n" ".unreq vU35A\n" ".unreq vU12A\n"
-        ".unreq vU42B\n" ".unreq vU44B\n" ".unreq vU13B\n" ".unreq vW33A\n"
-        ".unreq vV31B\n" ".unreq vV23A\n" ".unreq vU31A\n" ".unreq vU35B\n" ".unreq vU13A\n"
-        ".unreq vV23B\n" ".unreq vU11A\n" ".unreq vU25A\n" ".unreq vU43A\n" ".unreq vU52B\n"
-        ".unreq vU24A\n" ".unreq vU23B\n" ".unreq vV21A\n" ".unreq vV32B\n"
-        ".unreq vV33B\n" ".unreq vW11A\n" ".unreq vU31B\n"
-        ".unreq vW12B\n" ".unreq vU33A\n" ".unreq vU14A\n" ".unreq vU22A\n"
-        ".unreq vU25B\n" ".unreq vU53B\n" ".unreq vU42A\n" ".unreq vU44A\n"
-        ".unreq vU43B\n" ".unreq vW31A\n" ".unreq vU11B\n"
-        ".unreq vW11B\n" ".unreq vW32A\n"
-        ".unreq vU12B\n" ".unreq vU34B\n" ".unreq vW21A\n"
-        ".unreq vU14B\n" ".unreq vV21B\n" ".unreq vW22A\n"
-        ".unreq vW23B\n" ".unreq vW23A\n" ".unreq vU21B\n"
-        ".unreq vU32B\n" ".unreq vU34A\n" ".unreq vU45B\n" ".unreq vV31A\n"
-        ".unreq vW12A\n" ".unreq vU33B\n" ".unreq vU15B\n"
-        ".unreq vW33B\n" ".unreq vU54A\n" ".unreq vU23A\n"
-        ".unreq vW32B\n" ".unreq vV33A\n" ".unreq vW31B\n" ".unreq vV12A\n"
-        ".unreq vV12B\n" ".unreq vU41A\n" ".unreq vU53A\n"
-        ".unreq vV13A\n" ".unreq vU32A\n" ".unreq vW22B\n"
-        ".unreq vV22B\n" ".unreq vU52A\n" ".unreq vV13B\n" ".unreq vV32A\n"
-        ".unreq vU55A\n" ".unreq vU55B\n" ".unreq vV22A\n" ".unreq vW21B\n"
-        ".unreq vV11A\n"
-        : [uptr0] "+r" (uptr0), [wptr0] "+r" (wptr0), [vptr0] "+r" (vptr0),
-          [n_iters] "+r" (n_iters)
-        : [u_row_stride] "r" (in_row_stride * sizeof(float)),
-          [u_col_stride] "r" (in_col_stride * sizeof(float)),
-          [w_row_stride] "r" (weight_row_stride * sizeof(float)),
-          [w_col_stride] "r" (weight_col_stride * sizeof(float)),
-          [v_row_stride] "r" (out_row_stride * sizeof(float)),
-          [v_col_stride] "r" (out_col_stride * sizeof(float)),
-          [odd_tail] "r" (odd_tail)
-        : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
-          "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
-          "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x0",
-          "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
-          "x12", "cc", "memory"
-    );
-  }
-  if (channels_remaining)
-  {
-    // Fall back on the unoptimised version to clean up the tail
-    ConvImpl::process_tile<false>(
-        channels_remaining,
-        wptr0, weight_row_stride, weight_col_stride,
-        uptr0, in_row_stride, in_col_stride,
-        vptr0, out_row_stride, out_col_stride,
-        0, 0, 0, 0, 0, 0
-    );
-  }
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x17, %[inptr0], %[input_row_stride]\n"
+    "add x18, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x25, %[outptr0], %[output_row_stride]\n"
+    "add x14, x17, %[input_row_stride]\n"
+    "add x22, x18, #64\n"
+    "add x15, x18, %[input_col_stride1]\n"
+    "add x21, x14, %[input_row_stride]\n"
+    "add x16, x15, #64\n"
+    "add x24, x15, %[input_col_stride1]\n"
+    "add x26, x21, %[input_row_stride]\n"
+    "add x23, x24, #64\n"
+    "add x13, x25, %[output_row_stride]\n"
+    "add x27, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x19, %[n_channels], #3\n"
+    "lsr x20, %[n_channels], #2\n"
+    "cbz x20, 4f\n"
+    "1:\n"
+    "ldr q19, [%[wbptr]]\n"
+    "subs x20, x20, #1\n"
+    "mov v8.16b, v19.16b\n"
+    "ldr q17, [%[wbptr], #16]\n"
+    "mov v5.16b, v19.16b\n"
+    "ldr q16, [%[wbptr], #32]\n"
+    "mov v7.16b, v19.16b\n"
+    "ldr q15, [%[wbptr], #48]\n"
+    "mov v2.16b, v19.16b\n"
+    "ldr q14, [%[wbptr], #64]\n"
+    "mov v4.16b, v19.16b\n"
+    "ldr q13, [%[wbptr], #80]\n"
+    "mov v6.16b, v19.16b\n"
+    "ldr q12, [%[wbptr], #96]\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr q11, [%[wbptr], #112]\n"
+    "mov v3.16b, v19.16b\n"
+    "ldr q10, [%[wbptr], #128]\n"
+    "mov v0.16b, v19.16b\n"
+    "ldr q9, [%[wbptr], #144]\n"
+    "ldr q25, [%[inptr0]]\n"
+    "ldr q27, [x17]\n"
+    "fmla v8.4s, v25.4s, v17.4s\n"
+    "ldr q26, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q20, [x14]\n"
+    "ldr q22, [x17, %[input_col_stride1]]\n"
+    "ldr q28, [%[inptr0], x18]\n"
+    "ldr q23, [x21]\n"
+    "fmla v8.4s, v27.4s, v14.4s\n"
+    "ldr q18, [x14, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x17, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "prfm pldl1keep, [x14, #64]\n"
+    "prfm pldl1keep, [x17, x28]\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "prfm pldl1keep, [x14, x28]\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v5.4s, v27.4s, v17.4s\n"
+    "ldr q27, [x17, x18]\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "ldr q30, [%[inptr0], x15]\n"
+    "fmla v7.4s, v26.4s, v17.4s\n"
+    "ldr q31, [x26]\n"
+    "fmla v5.4s, v20.4s, v14.4s\n"
+    "ldr q24, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x17, x22]\n"
+    "fmla v2.4s, v20.4s, v17.4s\n"
+    "ldr q29, [x14, x18]\n"
+    "fmla v5.4s, v22.4s, v16.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v7.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x21, x28]\n"
+    "fmla v4.4s, v22.4s, v17.4s\n"
+    "ldr q21, [x17, x15]\n"
+    "fmla v8.4s, v28.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x22]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x16]\n"
+    "fmla v6.4s, v28.4s, v17.4s\n"
+    "ldr q19, [%[inptr0], x24]\n"
+    "fmla v5.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "ldr q28, [x26, %[input_col_stride1]]\n"
+    "fmla v8.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x26, x28]\n"
+    "fmla v5.4s, v18.4s, v13.4s\n"
+    "prfm pldl1keep, [x21, x22]\n"
+    "fmla v7.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x14, x16]\n"
+    "fmla v2.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x23]\n"
+    "fmla v4.4s, v18.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x22]\n"
+    "fmla v1.4s, v18.4s, v17.4s\n"
+    "ldr q25, [x21, x18]\n"
+    "fmla v8.4s, v27.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v5.4s, v27.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x23]\n"
+    "fmla v7.4s, v27.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, x16]\n"
+    "fmla v4.4s, v27.4s, v16.4s\n"
+    "prfm pldl1keep, [x21, x23]\n"
+    "fmla v6.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x23]\n"
+    "fmla v3.4s, v27.4s, v17.4s\n"
+    "ldr q27, [x14, x15]\n"
+    "fmla v7.4s, v30.4s, v15.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v6.4s, v30.4s, v16.4s\n"
+    "ldr q26, [x17, x24]\n"
+    "fmla v2.4s, v31.4s, v11.4s\n"
+    "ldr q20, [x26, x18]\n"
+    "fmla v5.4s, v24.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v4.4s, v24.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v2.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v1.4s, v24.4s, v14.4s\n"
+    "ldr q18, [x21, x15]\n"
+    "fmla v8.4s, v29.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "fmla v5.4s, v29.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v7.4s, v29.4s, v10.4s\n"
+    "add x17, x17, #16\n"
+    "fmla v2.4s, v29.4s, v15.4s\n"
+    "prfm pldl1keep, [x17, #64]\n"
+    "fmla v4.4s, v29.4s, v13.4s\n"
+    "prfm pldl1keep, [x17, x28]\n"
+    "fmla v6.4s, v29.4s, v11.4s\n"
+    "subs x20, x20, #1\n"
+    "fmla v1.4s, v29.4s, v16.4s\n"
+    "fmla v3.4s, v29.4s, v14.4s\n"
+    "fmla v0.4s, v29.4s, v17.4s\n"
+    "ldr q22, [x14, x24]\n"
+    "fmla v7.4s, v21.4s, v12.4s\n"
+    "ldr q23, [x26, x15]\n"
+    "fmla v4.4s, v21.4s, v15.4s\n"
+    "add x14, x14, #16\n"
+    "fmla v6.4s, v21.4s, v13.4s\n"
+    "prfm pldl1keep, [x14, #64]\n"
+    "fmla v3.4s, v21.4s, v16.4s\n"
+    "ldr q24, [x21, x24]\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "prfm pldl1keep, [x14, x28]\n"
+    "fmla v6.4s, v19.4s, v15.4s\n"
+    "ldr q21, [x26, x24]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "ldr q19, [%[wbptr]]\n"
+    "fmla v5.4s, v25.4s, v9.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v2.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v1.4s, v25.4s, v13.4s\n"
+    "fmla v3.4s, v25.4s, v11.4s\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "ldr q17, [%[wbptr], #16]\n"
+    "fmla v7.4s, v27.4s, v9.4s\n"
+    "ldr q25, [%[inptr0]]\n"
+    "fmla v4.4s, v27.4s, v12.4s\n"
+    "fmla v6.4s, v27.4s, v10.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v3.4s, v27.4s, v13.4s\n"
+    "fmla v0.4s, v27.4s, v16.4s\n"
+    "ldr q14, [%[wbptr], #64]\n"
+    "fmla v6.4s, v26.4s, v12.4s\n"
+    "ldr q27, [x17]\n"
+    "fmla v3.4s, v26.4s, v15.4s\n"
+    "ldr q26, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v20.4s, v9.4s\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v0.4s, v20.4s, v11.4s\n"
+    "ldr q16, [%[wbptr], #32]\n"
+    "fmla v4.4s, v18.4s, v9.4s\n"
+    "ldr q20, [x14]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v18.4s, v13.4s\n"
+    "ldr q11, [%[wbptr], #112]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "movi v30.16b, #0\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v0.4s, v22.4s, v15.4s\n"
+    "ldr q13, [%[wbptr], #80]\n"
+    "fmov v29.4s, #6.0\n"
+    "fmax v8.4s, v8.4s, v30.4s\n"
+    "fmla v3.4s, v24.4s, v9.4s\n"
+    "fmax v7.4s, v7.4s, v30.4s\n"
+    "fmla v0.4s, v23.4s, v10.4s\n"
+    "ldr q15, [%[wbptr], #48]\n"
+    "fmin v8.4s, v8.4s, v29.4s\n"
+    "ldr q22, [x17, %[input_col_stride1]]\n"
+    "fmin v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v30.4s\n"
+    "str q8, [%[outptr0]]\n"
+    "fmla v0.4s, v24.4s, v12.4s\n"
+    "str q7, [%[outptr0], %[output_col_stride1]]\n"
+    "fmin v6.4s, v6.4s, v29.4s\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "ldr q10, [%[wbptr], #128]\n"
+    "str q6, [%[outptr0], x27]\n"
+    "fmla v0.4s, v21.4s, v9.4s\n"
+    "fmin v5.4s, v5.4s, v29.4s\n"
+    "ldr q12, [%[wbptr], #96]\n"
+    "fmax v4.4s, v4.4s, v30.4s\n"
+    "ldr q28, [%[inptr0], x18]\n"
+    "str q5, [x25]\n"
+    "fmax v3.4s, v3.4s, v30.4s\n"
+    "fmin v4.4s, v4.4s, v29.4s\n"
+    "ldr q9, [%[wbptr], #144]\n"
+    "fmin v3.4s, v3.4s, v29.4s\n"
+    "ldr q23, [x21]\n"
+    "str q4, [x25, %[output_col_stride1]]\n"
+    "fmax v2.4s, v2.4s, v30.4s\n"
+    "str q3, [x25, x27]\n"
+    "fmax v1.4s, v1.4s, v30.4s\n"
+    "fmin v2.4s, v2.4s, v29.4s\n"
+    "ldr q18, [x14, %[input_col_stride1]]\n"
+    "fmin v1.4s, v1.4s, v29.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "str q2, [x13]\n"
+    "fmax v0.4s, v0.4s, v30.4s\n"
+    "str q1, [x13, %[output_col_stride1]]\n"
+    "mov v8.16b, v19.16b\n"
+    "fmin v0.4s, v0.4s, v29.4s\n"
+    "add x25, x25, #16\n"
+    "mov v5.16b, v19.16b\n"
+    "mov v7.16b, v19.16b\n"
+    "str q0, [x13, x27]\n"
+    "mov v2.16b, v19.16b\n"
+    "mov v4.16b, v19.16b\n"
+    "add x13, x13, #16\n"
+    "mov v6.16b, v19.16b\n"
+    "mov v1.16b, v19.16b\n"
+    "mov v3.16b, v19.16b\n"
+    "mov v0.16b, v19.16b\n"
+    "fmla v8.4s, v25.4s, v17.4s\n"
+    "fmla v8.4s, v27.4s, v14.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v5.4s, v27.4s, v17.4s\n"
+    "ldr q27, [x17, x18]\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "ldr q30, [%[inptr0], x15]\n"
+    "fmla v7.4s, v26.4s, v17.4s\n"
+    "ldr q31, [x26]\n"
+    "fmla v5.4s, v20.4s, v14.4s\n"
+    "ldr q24, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x17, x22]\n"
+    "fmla v2.4s, v20.4s, v17.4s\n"
+    "ldr q29, [x14, x18]\n"
+    "fmla v5.4s, v22.4s, v16.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v7.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x21, x28]\n"
+    "fmla v4.4s, v22.4s, v17.4s\n"
+    "ldr q21, [x17, x15]\n"
+    "fmla v8.4s, v28.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x22]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x16]\n"
+    "fmla v6.4s, v28.4s, v17.4s\n"
+    "ldr q19, [%[inptr0], x24]\n"
+    "fmla v5.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "ldr q28, [x26, %[input_col_stride1]]\n"
+    "fmla v8.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x26, x28]\n"
+    "fmla v5.4s, v18.4s, v13.4s\n"
+    "prfm pldl1keep, [x21, x22]\n"
+    "fmla v7.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x14, x16]\n"
+    "fmla v2.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x23]\n"
+    "fmla v4.4s, v18.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x22]\n"
+    "fmla v1.4s, v18.4s, v17.4s\n"
+    "ldr q25, [x21, x18]\n"
+    "fmla v8.4s, v27.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v5.4s, v27.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x23]\n"
+    "fmla v7.4s, v27.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, x16]\n"
+    "fmla v4.4s, v27.4s, v16.4s\n"
+    "prfm pldl1keep, [x21, x23]\n"
+    "fmla v6.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x23]\n"
+    "fmla v3.4s, v27.4s, v17.4s\n"
+    "ldr q27, [x14, x15]\n"
+    "fmla v7.4s, v30.4s, v15.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v6.4s, v30.4s, v16.4s\n"
+    "ldr q26, [x17, x24]\n"
+    "fmla v2.4s, v31.4s, v11.4s\n"
+    "ldr q20, [x26, x18]\n"
+    "fmla v5.4s, v24.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v4.4s, v24.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v2.4s, v24.4s, v13.4s\n"
+    "add x17, x17, #16\n"
+    "fmla v1.4s, v24.4s, v14.4s\n"
+    "ldr q18, [x21, x15]\n"
+    "fmla v8.4s, v29.4s, v9.4s\n"
+    "fmla v5.4s, v29.4s, v12.4s\n"
+    "fmla v7.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v15.4s\n"
+    "fmla v4.4s, v29.4s, v13.4s\n"
+    "fmla v6.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v16.4s\n"
+    "fmla v3.4s, v29.4s, v14.4s\n"
+    "fmla v0.4s, v29.4s, v17.4s\n"
+    "ldr q22, [x14, x24]\n"
+    "fmla v7.4s, v21.4s, v12.4s\n"
+    "ldr q23, [x26, x15]\n"
+    "fmla v4.4s, v21.4s, v15.4s\n"
+    "add x14, x14, #16\n"
+    "fmla v6.4s, v21.4s, v13.4s\n"
+    "fmla v3.4s, v21.4s, v16.4s\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "ldr q24, [x21, x24]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "ldr q21, [x26, x24]\n"
+    "fmla v6.4s, v19.4s, v15.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v5.4s, v25.4s, v9.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v2.4s, v25.4s, v12.4s\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v13.4s\n"
+    "fmla v3.4s, v25.4s, v11.4s\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "fmla v7.4s, v27.4s, v9.4s\n"
+    "fmla v4.4s, v27.4s, v12.4s\n"
+    "fmla v6.4s, v27.4s, v10.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v3.4s, v27.4s, v13.4s\n"
+    "fmla v0.4s, v27.4s, v16.4s\n"
+    "fmla v2.4s, v20.4s, v9.4s\n"
+    "fmla v6.4s, v26.4s, v12.4s\n"
+    "fmla v4.4s, v18.4s, v9.4s\n"
+    "fmla v3.4s, v26.4s, v15.4s\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v0.4s, v20.4s, v11.4s\n"
+    "movi v30.16b, #0\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "fmov v29.4s, #6.0\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v18.4s, v13.4s\n"
+    "fmax v8.4s, v8.4s, v30.4s\n"
+    "fmax v7.4s, v7.4s, v30.4s\n"
+    "fmax v6.4s, v6.4s, v30.4s\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v0.4s, v22.4s, v15.4s\n"
+    "fmin v8.4s, v8.4s, v29.4s\n"
+    "fmin v7.4s, v7.4s, v29.4s\n"
+    "fmin v6.4s, v6.4s, v29.4s\n"
+    "str q8, [%[outptr0]]\n"
+    "fmla v3.4s, v24.4s, v9.4s\n"
+    "str q7, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v23.4s, v10.4s\n"
+    "str q6, [%[outptr0], x27]\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "fmax v4.4s, v4.4s, v30.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v0.4s, v24.4s, v12.4s\n"
+    "fmin v5.4s, v5.4s, v29.4s\n"
+    "fmin v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v30.4s\n"
+    "str q5, [x25]\n"
+    "fmax v2.4s, v2.4s, v30.4s\n"
+    "str q4, [x25, %[output_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v9.4s\n"
+    "fmin v3.4s, v3.4s, v29.4s\n"
+    "fmin v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v30.4s\n"
+    "str q3, [x25, x27]\n"
+    "str q2, [x13]\n"
+    "fmin v1.4s, v1.4s, v29.4s\n"
+    "fmax v0.4s, v0.4s, v30.4s\n"
+    "add x25, x25, #16\n"
+    "str q1, [x13, %[output_col_stride1]]\n"
+    "fmin v0.4s, v0.4s, v29.4s\n"
+    "str q0, [x13, x27]\n"
+    "add x13, x13, #16\n"
+    "4:\n"
+    "cbz x19, 7f\n"
+    "ldr s19, [%[wbptr]]\n"
+    "mov v8.16b, v19.16b\n"
+    "ldr s17, [%[wbptr], #4]\n"
+    "mov v5.16b, v19.16b\n"
+    "ldr s16, [%[wbptr], #8]\n"
+    "mov v7.16b, v19.16b\n"
+    "ldr s15, [%[wbptr], #12]\n"
+    "mov v2.16b, v19.16b\n"
+    "ldr s14, [%[wbptr], #16]\n"
+    "mov v4.16b, v19.16b\n"
+    "ldr s13, [%[wbptr], #20]\n"
+    "mov v6.16b, v19.16b\n"
+    "ldr s12, [%[wbptr], #24]\n"
+    "mov v1.16b, v19.16b\n"
+    "ldr s11, [%[wbptr], #28]\n"
+    "mov v3.16b, v19.16b\n"
+    "ldr s10, [%[wbptr], #32]\n"
+    "mov v0.16b, v19.16b\n"
+    "ldr s9, [%[wbptr], #36]\n"
+    "ldr s25, [%[inptr0]]\n"
+    "subs x19, x19, #1\n"
+    "fmla v8.4s, v25.4s, v17.4s\n"
+    "ldr s27, [x17]\n"
+    "ldr s26, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s20, [x14]\n"
+    "ldr s22, [x17, %[input_col_stride1]]\n"
+    "ldr s28, [%[inptr0], x18]\n"
+    "fmla v8.4s, v27.4s, v14.4s\n"
+    "ldr s23, [x21]\n"
+    "ldr s18, [x14, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x17, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "prfm pldl1keep, [x14, #64]\n"
+    "prfm pldl1keep, [x17, x28]\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "prfm pldl1keep, [x14, x28]\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v5.4s, v27.4s, v17.4s\n"
+    "ldr s27, [x17, x18]\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "ldr s30, [%[inptr0], x15]\n"
+    "fmla v7.4s, v26.4s, v17.4s\n"
+    "ldr s31, [x26]\n"
+    "fmla v5.4s, v20.4s, v14.4s\n"
+    "ldr s24, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x17, x22]\n"
+    "fmla v2.4s, v20.4s, v17.4s\n"
+    "ldr s29, [x14, x18]\n"
+    "fmla v5.4s, v22.4s, v16.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v7.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x21, x28]\n"
+    "fmla v4.4s, v22.4s, v17.4s\n"
+    "ldr s21, [x17, x15]\n"
+    "fmla v8.4s, v28.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x22]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x16]\n"
+    "fmla v6.4s, v28.4s, v17.4s\n"
+    "ldr s19, [%[inptr0], x24]\n"
+    "fmla v5.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "ldr s28, [x26, %[input_col_stride1]]\n"
+    "fmla v8.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x26, x28]\n"
+    "fmla v5.4s, v18.4s, v13.4s\n"
+    "prfm pldl1keep, [x21, x22]\n"
+    "fmla v7.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x14, x16]\n"
+    "fmla v2.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x23]\n"
+    "fmla v4.4s, v18.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x22]\n"
+    "fmla v1.4s, v18.4s, v17.4s\n"
+    "ldr s25, [x21, x18]\n"
+    "fmla v8.4s, v27.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v5.4s, v27.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x23]\n"
+    "fmla v7.4s, v27.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, x16]\n"
+    "fmla v4.4s, v27.4s, v16.4s\n"
+    "prfm pldl1keep, [x21, x23]\n"
+    "fmla v6.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x23]\n"
+    "fmla v3.4s, v27.4s, v17.4s\n"
+    "ldr s27, [x14, x15]\n"
+    "fmla v7.4s, v30.4s, v15.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v6.4s, v30.4s, v16.4s\n"
+    "ldr s26, [x17, x24]\n"
+    "fmla v2.4s, v31.4s, v11.4s\n"
+    "ldr s20, [x26, x18]\n"
+    "fmla v5.4s, v24.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v4.4s, v24.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v2.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v1.4s, v24.4s, v14.4s\n"
+    "ldr s18, [x21, x15]\n"
+    "fmla v8.4s, v29.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "fmla v5.4s, v29.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v7.4s, v29.4s, v10.4s\n"
+    "add x17, x17, #4\n"
+    "fmla v2.4s, v29.4s, v15.4s\n"
+    "prfm pldl1keep, [x17, #64]\n"
+    "fmla v4.4s, v29.4s, v13.4s\n"
+    "prfm pldl1keep, [x17, x28]\n"
+    "fmla v6.4s, v29.4s, v11.4s\n"
+    "subs x19, x19, #1\n"
+    "fmla v1.4s, v29.4s, v16.4s\n"
+    "fmla v3.4s, v29.4s, v14.4s\n"
+    "fmla v0.4s, v29.4s, v17.4s\n"
+    "ldr s22, [x14, x24]\n"
+    "fmla v7.4s, v21.4s, v12.4s\n"
+    "ldr s23, [x26, x15]\n"
+    "fmla v4.4s, v21.4s, v15.4s\n"
+    "add x14, x14, #4\n"
+    "fmla v6.4s, v21.4s, v13.4s\n"
+    "prfm pldl1keep, [x14, #64]\n"
+    "fmla v3.4s, v21.4s, v16.4s\n"
+    "ldr s24, [x21, x24]\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "prfm pldl1keep, [x14, x28]\n"
+    "fmla v6.4s, v19.4s, v15.4s\n"
+    "ldr s21, [x26, x24]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "ldr s19, [%[wbptr]]\n"
+    "fmla v5.4s, v25.4s, v9.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v2.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, #64]\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v1.4s, v25.4s, v13.4s\n"
+    "fmla v3.4s, v25.4s, v11.4s\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "ldr s17, [%[wbptr], #4]\n"
+    "fmla v7.4s, v27.4s, v9.4s\n"
+    "ldr s25, [%[inptr0]]\n"
+    "fmla v4.4s, v27.4s, v12.4s\n"
+    "fmla v6.4s, v27.4s, v10.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v3.4s, v27.4s, v13.4s\n"
+    "fmla v0.4s, v27.4s, v16.4s\n"
+    "ldr s14, [%[wbptr], #16]\n"
+    "fmla v6.4s, v26.4s, v12.4s\n"
+    "ldr s27, [x17]\n"
+    "fmla v3.4s, v26.4s, v15.4s\n"
+    "ldr s26, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v2.4s, v20.4s, v9.4s\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v0.4s, v20.4s, v11.4s\n"
+    "ldr s16, [%[wbptr], #8]\n"
+    "fmla v4.4s, v18.4s, v9.4s\n"
+    "ldr s20, [x14]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v18.4s, v13.4s\n"
+    "ldr s11, [%[wbptr], #28]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "movi v30.16b, #0\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v0.4s, v22.4s, v15.4s\n"
+    "ldr s13, [%[wbptr], #20]\n"
+    "fmov v29.4s, #6.0\n"
+    "fmax v8.4s, v8.4s, v30.4s\n"
+    "fmla v3.4s, v24.4s, v9.4s\n"
+    "fmax v7.4s, v7.4s, v30.4s\n"
+    "fmla v0.4s, v23.4s, v10.4s\n"
+    "ldr s15, [%[wbptr], #12]\n"
+    "fmin v8.4s, v8.4s, v29.4s\n"
+    "ldr s22, [x17, %[input_col_stride1]]\n"
+    "fmin v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v30.4s\n"
+    "str s8, [%[outptr0]]\n"
+    "fmla v0.4s, v24.4s, v12.4s\n"
+    "str s7, [%[outptr0], %[output_col_stride1]]\n"
+    "fmin v6.4s, v6.4s, v29.4s\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "ldr s10, [%[wbptr], #32]\n"
+    "str s6, [%[outptr0], x27]\n"
+    "fmla v0.4s, v21.4s, v9.4s\n"
+    "fmin v5.4s, v5.4s, v29.4s\n"
+    "ldr s12, [%[wbptr], #24]\n"
+    "fmax v4.4s, v4.4s, v30.4s\n"
+    "ldr s28, [%[inptr0], x18]\n"
+    "str s5, [x25]\n"
+    "fmax v3.4s, v3.4s, v30.4s\n"
+    "fmin v4.4s, v4.4s, v29.4s\n"
+    "ldr s9, [%[wbptr], #36]\n"
+    "fmin v3.4s, v3.4s, v29.4s\n"
+    "ldr s23, [x21]\n"
+    "str s4, [x25, %[output_col_stride1]]\n"
+    "fmax v2.4s, v2.4s, v30.4s\n"
+    "str s3, [x25, x27]\n"
+    "fmax v1.4s, v1.4s, v30.4s\n"
+    "fmin v2.4s, v2.4s, v29.4s\n"
+    "ldr s18, [x14, %[input_col_stride1]]\n"
+    "fmin v1.4s, v1.4s, v29.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "str s2, [x13]\n"
+    "fmax v0.4s, v0.4s, v30.4s\n"
+    "str s1, [x13, %[output_col_stride1]]\n"
+    "mov v8.16b, v19.16b\n"
+    "fmin v0.4s, v0.4s, v29.4s\n"
+    "add x25, x25, #4\n"
+    "mov v5.16b, v19.16b\n"
+    "mov v7.16b, v19.16b\n"
+    "str s0, [x13, x27]\n"
+    "mov v2.16b, v19.16b\n"
+    "mov v4.16b, v19.16b\n"
+    "add x13, x13, #4\n"
+    "mov v6.16b, v19.16b\n"
+    "mov v1.16b, v19.16b\n"
+    "mov v3.16b, v19.16b\n"
+    "mov v0.16b, v19.16b\n"
+    "fmla v8.4s, v25.4s, v17.4s\n"
+    "fmla v8.4s, v27.4s, v14.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v5.4s, v27.4s, v17.4s\n"
+    "ldr s27, [x17, x18]\n"
+    "fmla v8.4s, v26.4s, v16.4s\n"
+    "ldr s30, [%[inptr0], x15]\n"
+    "fmla v7.4s, v26.4s, v17.4s\n"
+    "ldr s31, [x26]\n"
+    "fmla v5.4s, v20.4s, v14.4s\n"
+    "ldr s24, [x21, %[input_col_stride1]]\n"
+    "fmla v8.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x17, x22]\n"
+    "fmla v2.4s, v20.4s, v17.4s\n"
+    "ldr s29, [x14, x18]\n"
+    "fmla v5.4s, v22.4s, v16.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v8.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v7.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x21, x28]\n"
+    "fmla v4.4s, v22.4s, v17.4s\n"
+    "ldr s21, [x17, x15]\n"
+    "fmla v8.4s, v28.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x22]\n"
+    "fmla v7.4s, v28.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x16]\n"
+    "fmla v6.4s, v28.4s, v17.4s\n"
+    "ldr s19, [%[inptr0], x24]\n"
+    "fmla v5.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [%[inptr0], x23]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "ldr s28, [x26, %[input_col_stride1]]\n"
+    "fmla v8.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x26, x28]\n"
+    "fmla v5.4s, v18.4s, v13.4s\n"
+    "prfm pldl1keep, [x21, x22]\n"
+    "fmla v7.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x14, x16]\n"
+    "fmla v2.4s, v18.4s, v16.4s\n"
+    "prfm pldl1keep, [x17, x23]\n"
+    "fmla v4.4s, v18.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x22]\n"
+    "fmla v1.4s, v18.4s, v17.4s\n"
+    "ldr s25, [x21, x18]\n"
+    "fmla v8.4s, v27.4s, v12.4s\n"
+    "prfm pldl1keep, [x21, x16]\n"
+    "fmla v5.4s, v27.4s, v15.4s\n"
+    "prfm pldl1keep, [x14, x23]\n"
+    "fmla v7.4s, v27.4s, v13.4s\n"
+    "prfm pldl1keep, [x26, x16]\n"
+    "fmla v4.4s, v27.4s, v16.4s\n"
+    "prfm pldl1keep, [x21, x23]\n"
+    "fmla v6.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x23]\n"
+    "fmla v3.4s, v27.4s, v17.4s\n"
+    "ldr s27, [x14, x15]\n"
+    "fmla v7.4s, v30.4s, v15.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v6.4s, v30.4s, v16.4s\n"
+    "ldr s26, [x17, x24]\n"
+    "fmla v2.4s, v31.4s, v11.4s\n"
+    "ldr s20, [x26, x18]\n"
+    "fmla v5.4s, v24.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v4.4s, v24.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v2.4s, v24.4s, v13.4s\n"
+    "add x17, x17, #4\n"
+    "fmla v1.4s, v24.4s, v14.4s\n"
+    "ldr s18, [x21, x15]\n"
+    "fmla v8.4s, v29.4s, v9.4s\n"
+    "fmla v5.4s, v29.4s, v12.4s\n"
+    "fmla v7.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v15.4s\n"
+    "fmla v4.4s, v29.4s, v13.4s\n"
+    "fmla v6.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v16.4s\n"
+    "fmla v3.4s, v29.4s, v14.4s\n"
+    "fmla v0.4s, v29.4s, v17.4s\n"
+    "ldr s22, [x14, x24]\n"
+    "fmla v7.4s, v21.4s, v12.4s\n"
+    "ldr s23, [x26, x15]\n"
+    "fmla v4.4s, v21.4s, v15.4s\n"
+    "add x14, x14, #4\n"
+    "fmla v6.4s, v21.4s, v13.4s\n"
+    "fmla v3.4s, v21.4s, v16.4s\n"
+    "fmla v2.4s, v28.4s, v10.4s\n"
+    "ldr s24, [x21, x24]\n"
+    "fmla v1.4s, v28.4s, v11.4s\n"
+    "ldr s21, [x26, x24]\n"
+    "fmla v6.4s, v19.4s, v15.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v5.4s, v25.4s, v9.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v2.4s, v25.4s, v12.4s\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v13.4s\n"
+    "fmla v3.4s, v25.4s, v11.4s\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "fmla v7.4s, v27.4s, v9.4s\n"
+    "fmla v4.4s, v27.4s, v12.4s\n"
+    "fmla v6.4s, v27.4s, v10.4s\n"
+    "fmla v1.4s, v27.4s, v15.4s\n"
+    "fmla v3.4s, v27.4s, v13.4s\n"
+    "fmla v0.4s, v27.4s, v16.4s\n"
+    "fmla v2.4s, v20.4s, v9.4s\n"
+    "fmla v6.4s, v26.4s, v12.4s\n"
+    "fmla v4.4s, v18.4s, v9.4s\n"
+    "fmla v3.4s, v26.4s, v15.4s\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v0.4s, v20.4s, v11.4s\n"
+    "movi v30.16b, #0\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "fmov v29.4s, #6.0\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v18.4s, v13.4s\n"
+    "fmax v8.4s, v8.4s, v30.4s\n"
+    "fmax v7.4s, v7.4s, v30.4s\n"
+    "fmax v6.4s, v6.4s, v30.4s\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v0.4s, v22.4s, v15.4s\n"
+    "fmin v8.4s, v8.4s, v29.4s\n"
+    "fmin v7.4s, v7.4s, v29.4s\n"
+    "fmin v6.4s, v6.4s, v29.4s\n"
+    "str s8, [%[outptr0]]\n"
+    "fmla v3.4s, v24.4s, v9.4s\n"
+    "str s7, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v0.4s, v23.4s, v10.4s\n"
+    "str s6, [%[outptr0], x27]\n"
+    "fmax v5.4s, v5.4s, v30.4s\n"
+    "fmax v4.4s, v4.4s, v30.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v0.4s, v24.4s, v12.4s\n"
+    "fmin v5.4s, v5.4s, v29.4s\n"
+    "fmin v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v30.4s\n"
+    "str s5, [x25]\n"
+    "fmax v2.4s, v2.4s, v30.4s\n"
+    "str s4, [x25, %[output_col_stride1]]\n"
+    "fmla v0.4s, v21.4s, v9.4s\n"
+    "fmin v3.4s, v3.4s, v29.4s\n"
+    "fmin v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v30.4s\n"
+    "str s3, [x25, x27]\n"
+    "str s2, [x13]\n"
+    "fmin v1.4s, v1.4s, v29.4s\n"
+    "fmax v0.4s, v0.4s, v30.4s\n"
+    "add x25, x25, #4\n"
+    "str s1, [x13, %[output_col_stride1]]\n"
+    "fmin v0.4s, v0.4s, v29.4s\n"
+    "str s0, [x13, x27]\n"
+    "add x13, x13, #4\n"
+    "7:\n"
+    : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr)
+    : [input_col_stride1] "r" (input_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels)
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
+  );
 }
 
 #endif  // __aarch64__
 
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+template class DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float, float>;
 
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp16_fp16.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp16_fp16.cpp
new file mode 100644
index 0000000..8348692
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp16_fp16.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "impl_fp16_fp16.hpp"
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+namespace depthwise
+{
+template class DepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>;
+}  // namespace depthwise
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp
index c7113d0..adc6969 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,599 +25,745 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<3, 3, 3, 3, 2, 2, float, float>;
+
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>;
 
 #ifdef __aarch64__
-
 template <>
 template <>
-void ConvImpl::process_tile<true, 0, 0, 0, 0, 0, 0>(
-  const int n_channels,
-  const float* const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float* const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float* const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int, const int, const int, const int, const int, const int, const int, const int
+void Conv::execute_tile<ActivationFunction::None>(
+  int n_channels,
+  const void* weight_bias_ptr,
+  const float* input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float* output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
 )
 {
-  // Copy pointers
-  const float *uptr0 = inptr;
-  const float *wptr0 = weights;
-  float *vptr0 = outptr;
-
-  int channels_remaining = n_channels;
-  if (channels_remaining >= 4)
-  {
-    // Process blocks of 4 channels at a time
-    int n_iters = channels_remaining / 4 - 1;
-    channels_remaining %= 4;
-
-    asm volatile(
-        // Prepare aliases
-        "qW13 .req q0\n" "vW13 .req v0\n"
-        "qU15 .req q1\n" "qU73 .req q1\n" "qU45 .req q1\n" "qU14 .req q1\n"
-        "vU15 .req v1\n" "vU73 .req v1\n" "vU45 .req v1\n" "vU14 .req v1\n"
-        "qU62 .req q2\n" "qV12 .req q2\n" "vU62 .req v2\n" "vV12 .req v2\n"
-        "qU51 .req q3\n" "qU43 .req q3\n" "qU55 .req q3\n"
-        "vU51 .req v3\n" "vU43 .req v3\n" "vU55 .req v3\n"
-        "qU77 .req q4\n" "qV13 .req q4\n" "qV31 .req q4\n" "qU44 .req q4\n"
-        "vU77 .req v4\n" "vV13 .req v4\n" "vV31 .req v4\n" "vU44 .req v4\n"
-        "qV33 .req q5\n" "qU46 .req q5\n" "qU11 .req q5\n" "qU37 .req q5\n"
-        "vV33 .req v5\n" "vU46 .req v5\n" "vU11 .req v5\n" "vU37 .req v5\n"
-        "qU56 .req q6\n" "qU25 .req q6\n" "qU32 .req q6\n"
-        "vU56 .req v6\n" "vU25 .req v6\n" "vU32 .req v6\n"
-        "qU72 .req q7\n" "qV22 .req q7\n" "vU72 .req v7\n" "vV22 .req v7\n"
-        "qU67 .req q8\n" "qU61 .req q8\n" "qU13 .req q8\n"
-        "vU67 .req v8\n" "vU61 .req v8\n" "vU13 .req v8\n"
-        "qU74 .req q9\n" "qU34 .req q9\n" "qU17 .req q9\n" "qU66 .req q9\n"
-        "vU74 .req v9\n" "vU34 .req v9\n" "vU17 .req v9\n" "vU66 .req v9\n"
-        "qU33 .req q10\n" "qU57 .req q10\n" "qU21 .req q10\n"
-        "vU33 .req v10\n" "vU57 .req v10\n" "vU21 .req v10\n" "qW23 .req q11\n"
-        "vW23 .req v11\n" "qU42 .req q12\n" "qV23 .req q12\n" "qU23 .req q12\n"
-        "vU42 .req v12\n" "vV23 .req v12\n" "vU23 .req v12\n"
-        "qW33 .req q13\n" "vW33 .req v13\n"
-        "qU76 .req q14\n" "qU47 .req q14\n" "qU64 .req q14\n" "qU41 .req q14\n"
-        "vU76 .req v14\n" "vU47 .req v14\n" "vU64 .req v14\n" "vU41 .req v14\n"
-        "qU52 .req q15\n" "qU54 .req q15\n" "qU75 .req q15\n" "qU26 .req q15\n"
-        "vU52 .req v15\n" "vU54 .req v15\n" "vU75 .req v15\n" "vU26 .req v15\n"
-        "qU53 .req q16\n" "qU27 .req q16\n" "vU53 .req v16\n" "vU27 .req v16\n"
-        "qV21 .req q17\n" "qU65 .req q17\n" "vV21 .req v17\n" "vU65 .req v17\n"
-        "qU31 .req q18\n" "qU24 .req q18\n" "qU36 .req q18\n"
-        "vU31 .req v18\n" "vU24 .req v18\n" "vU36 .req v18\n" "qU22 .req q19\n"
-        "vU22 .req v19\n" "qU35 .req q20\n" "qU63 .req q20\n"
-        "vU35 .req v20\n" "vU63 .req v20\n" "qW12 .req q21\n"
-        "vW12 .req v21\n" "qV32 .req q22\n" "qU16 .req q22\n"
-        "vV32 .req v22\n" "vU16 .req v22\n" "qW11 .req q23\n" "vW11 .req v23\n"
-        "qU12 .req q24\n" "vU12 .req v24\n" "qW31 .req q25\n" "vW31 .req v25\n"
-        "qW22 .req q26\n" "vW22 .req v26\n" "qU71 .req q27\n" "vU71 .req v27\n"
-        "qV11 .req q28\n" "vV11 .req v28\n" "qW21 .req q29\n" "vW21 .req v29\n"
-        "qW32 .req q30\n" "vW32 .req v30\n"
-
-        "uptr1 .req x0\n"
-        "uptr2 .req x1\n"
-        "uptr3 .req x2\n"
-        "uptr4 .req x3\n"
-        "uptr5 .req x4\n"
-        "uptr6 .req x5\n"
-        "u_col_stride1 .req %x[u_col_stride]\n"
-        "u_col_stride2 .req  x6\n"
-        "u_col_stride3 .req  x7\n"
-        "u_col_stride4 .req  x8\n"
-        "u_col_stride5 .req  x9\n"
-        "u_col_stride6 .req x10\n"
-        "wptr1 .req x11\n"
-        "wptr2 .req x12\n"
-        "w_col_stride1 .req %x[w_col_stride]\n"
-        "w_col_stride2 .req x13\n"
-        "vptr1 .req x14\n"
-        "vptr2 .req x15\n"
-        "v_col_stride1 .req %x[v_col_stride]\n"
-        "v_col_stride2 .req x16\n"
-
-        // Prepare strides and pointers
-        "add uptr1, %x[uptr0], %x[u_row_stride]\n"
-        "add uptr2,    uptr1 , %x[u_row_stride]\n"
-        "add uptr3,    uptr2 , %x[u_row_stride]\n"
-        "add uptr4,    uptr3 , %x[u_row_stride]\n"
-        "add uptr5,    uptr4 , %x[u_row_stride]\n"
-        "add uptr6,    uptr5 , %x[u_row_stride]\n"
-        "add u_col_stride2, u_col_stride1, u_col_stride1\n"
-        "add u_col_stride3, u_col_stride2, u_col_stride1\n"
-        "add u_col_stride4, u_col_stride3, u_col_stride1\n"
-        "add u_col_stride5, u_col_stride4, u_col_stride1\n"
-        "add u_col_stride6, u_col_stride5, u_col_stride1\n"
-
-        "add wptr1, %x[wptr0], %x[w_row_stride]\n"
-        "add wptr2,    wptr1 , %x[w_row_stride]\n"
-        "add w_col_stride2, w_col_stride1, w_col_stride1\n"
-
-        "add vptr1, %x[vptr0], %x[v_row_stride]\n"
-        "add vptr2,    vptr1 , %x[v_row_stride]\n"
-        "add v_col_stride2, v_col_stride1, v_col_stride1\n"
-
-        // Prepare for first iteration
-        "ldr qW13, [%x[wptr0], w_col_stride2]\n"
-        "ldr qW23, [wptr1, w_col_stride2]\n"
-        "ldr qW33, [wptr2, w_col_stride2]\n"
-        "ldr qW12, [%x[wptr0], w_col_stride1]\n"
-        "ldr qW22, [wptr1, w_col_stride1]\n"
-        "ldr qW32, [wptr2, w_col_stride1]\n"
-        "ldr qW11, [%x[wptr0]], #0x10\n"
-        "ldr qW21, [wptr1], #0x10\n"
-        "ldr qU17, [%x[uptr0], u_col_stride6]\n"
-        "ldr qU15, [%x[uptr0], u_col_stride4]\n"
-        "ldr qU16, [%x[uptr0], u_col_stride5]\n"
-        "ldr qU37, [uptr2, u_col_stride6]\n"
-        "ldr qU35, [uptr2, u_col_stride4]\n"
-        "ldr qU36, [uptr2, u_col_stride5]\n"
-        "ldr qU27, [uptr1, u_col_stride6]\n"
-        "ldr qU25, [uptr1, u_col_stride4]\n"
-        "fmul vV13.4s, vU17.4s, vW13.4s\n"
-        "fmul vV12.4s, vU15.4s, vW13.4s\n"
-        "fmla vV13.4s, vU15.4s, vW11.4s\n"
-        "ldr qW31, [wptr2], #0x10\n"
-        "fmla vV13.4s, vU16.4s, vW12.4s\n"
-        "ldr qU26, [uptr1, u_col_stride5]\n"
-        "fmla vV13.4s, vU37.4s, vW33.4s\n"
-        "ldr qU47, [uptr3, u_col_stride6]\n"
-        "fmul vV23.4s, vU37.4s, vW13.4s\n"
-        "ldr qU45, [uptr3, u_col_stride4]\n"
-        "fmla vV12.4s, vU35.4s, vW33.4s\n"
-        "ldr qU46, [uptr3, u_col_stride5]\n"
-        "fmla vV13.4s, vU35.4s, vW31.4s\n"
-        "ldr qU67, [uptr5, u_col_stride6]\n"
-        "fmul vV22.4s, vU35.4s, vW13.4s\n"
-        "cbz %x[n_iters], 2f\n"  // Jump to tail if no iterations
-
-        "1:"  // Loop body
-        "fmla vV23.4s, vU35.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, u_col_stride4]\n"
-        "fmla vV13.4s, vU36.4s, vW32.4s\n"
-        "fmla vV23.4s, vU36.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, u_col_stride5]\n"
-        "fmla vV13.4s, vU27.4s, vW23.4s\n"
-        "ldr qU57, [uptr4, u_col_stride6]\n"
-        "fmla vV12.4s, vU25.4s, vW23.4s\n"
-        "ldr qU55, [uptr4, u_col_stride4]\n"
-        "fmla vV13.4s, vU25.4s, vW21.4s\n"
-        "ldr qU56, [uptr4, u_col_stride5]\n"
-        "fmla vV13.4s, vU26.4s, vW22.4s\n"
-        "str qV13, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV23.4s, vU47.4s, vW23.4s\n"
-        "ldr qU77, [uptr6, u_col_stride6]\n"
-        "fmla vV22.4s, vU45.4s, vW23.4s\n"
-        "fmla vV23.4s, vU45.4s, vW21.4s\n"
-        "ldr qU75, [uptr6, u_col_stride4]\n"
-        "fmla vV23.4s, vU46.4s, vW22.4s\n"
-        "ldr qU76, [uptr6, u_col_stride5]\n"
-        "fmul vV33.4s, vU67.4s, vW23.4s\n"
-        "ldr qU14, [%x[uptr0], u_col_stride3]\n"
-        "fmul vV32.4s, vU65.4s, vW23.4s\n"
-        "fmla vV33.4s, vU65.4s, vW21.4s\n"
-        "ldr qU13, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV33.4s, vU66.4s, vW22.4s\n"
-        "ldr qU34, [uptr2, u_col_stride3]\n"
-        "fmla vV23.4s, vU57.4s, vW33.4s\n"
-        "fmla vV33.4s, vU57.4s, vW13.4s\n"
-        "ldr qU33, [uptr2, u_col_stride2]\n"
-        "fmla vV22.4s, vU55.4s, vW33.4s\n"
-        "fmla vV23.4s, vU55.4s, vW31.4s\n"
-        "fmla vV32.4s, vU55.4s, vW13.4s\n"
-        "fmla vV33.4s, vU55.4s, vW11.4s\n"
-        "ldr qU24, [uptr1, u_col_stride3]\n"
-        "fmla vV23.4s, vU56.4s, vW32.4s\n"
-        "str qV23, [vptr1, v_col_stride2]\n"
-        "fmla vV33.4s, vU56.4s, vW12.4s\n"
-        "ldr qU23, [uptr1, u_col_stride2]\n"
-        "fmla vV33.4s, vU77.4s, vW33.4s\n"
-        "ldr qU44, [uptr3, u_col_stride3]\n"
-        "fmla vV32.4s, vU75.4s, vW33.4s\n"
-        "fmla vV33.4s, vU75.4s, vW31.4s\n"
-        "ldr qU43, [uptr3, u_col_stride2]\n"
-        "fmla vV33.4s, vU76.4s, vW32.4s\n"
-        "str qV33, [vptr2, v_col_stride2]\n"
-        "ldr qU64, [uptr5, u_col_stride3]\n"
-        "fmla vV12.4s, vU14.4s, vW12.4s\n"
-        "ldr qU63, [uptr5, u_col_stride2]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "fmla vV12.4s, vU13.4s, vW11.4s\n"
-        "ldr qU54, [uptr4, u_col_stride3]\n"
-        "fmla vV12.4s, vU34.4s, vW32.4s\n"
-        "fmla vV22.4s, vU34.4s, vW12.4s\n"
-        "ldr qU53, [uptr4, u_col_stride2]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "ldr qU74, [uptr6, u_col_stride3]\n"
-        "fmla vV12.4s, vU33.4s, vW31.4s\n"
-        "ldr qU73, [uptr6, u_col_stride2]\n"
-        "fmul vV21.4s, vU33.4s, vW13.4s\n"
-        "ldr qU12, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV22.4s, vU33.4s, vW11.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV12.4s, vU24.4s, vW22.4s\n"
-        "ldr qU32, [uptr2, u_col_stride1]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV12.4s, vU23.4s, vW21.4s\n"
-        "str qV12, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV22.4s, vU44.4s, vW22.4s\n"
-        "ldr qU22, [uptr1, u_col_stride1]\n"
-        "fmla vV21.4s, vU43.4s, vW23.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV22.4s, vU43.4s, vW21.4s\n"
-        "ldr qU42, [uptr3, u_col_stride1]\n"
-        "fmla vV32.4s, vU64.4s, vW22.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmul vV31.4s, vU63.4s, vW23.4s\n"
-        "ldr qW23, [wptr1, w_col_stride2]\n"
-        "fmla vV32.4s, vU63.4s, vW21.4s\n"
-        "ldr qU62, [uptr5, u_col_stride1]\n"
-        "fmla vV22.4s, vU54.4s, vW32.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV32.4s, vU54.4s, vW12.4s\n"
-        "ldr qU52, [uptr4, u_col_stride1]\n"
-        "fmla vV21.4s, vU53.4s, vW33.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV22.4s, vU53.4s, vW31.4s\n"
-        "str qV22, [vptr1, v_col_stride1]\n"
-        "fmla vV31.4s, vU53.4s, vW13.4s\n"
-        "ldr qW13, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV32.4s, vU53.4s, vW11.4s\n"
-        "ldr qU72, [uptr6, u_col_stride1]\n"
-        "fmla vV32.4s, vU74.4s, vW32.4s\n"
-        "ldr qU71, [uptr6], #0x10\n"
-        "fmla vV31.4s, vU73.4s, vW33.4s\n"
-        "ldr qW33, [wptr2, w_col_stride2]\n"
-        "fmla vV32.4s, vU73.4s, vW31.4s\n"
-        "str qV32, [vptr2, v_col_stride1]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "ldr qU17, [%x[uptr0], u_col_stride6]\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "ldr qU15, [%x[uptr0], u_col_stride4]\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "ldr qU16, [%x[uptr0], u_col_stride5]\n"
-        "fmla vV21.4s, vU32.4s, vW12.4s\n"
-        "ldr qU37, [uptr2, u_col_stride6]\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "ldr qU35, [uptr2, u_col_stride4]\n"
-        "fmla vV21.4s, vU31.4s, vW11.4s\n"
-        "ldr qU36, [uptr2, u_col_stride5]\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "ldr qU27, [uptr1, u_col_stride6]\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW22.4s\n"
-        "ldr qU25, [uptr1, u_col_stride4]\n"
-        "fmla vV21.4s, vU41.4s, vW21.4s\n"
-        "fmla vV31.4s, vU62.4s, vW22.4s\n"
-        "ldr qW22, [wptr1, w_col_stride1]\n"
-        "fmla vV31.4s, vU61.4s, vW21.4s\n"
-        "ldr qW21, [wptr1], #0x10\n"
-        "fmla vV21.4s, vU52.4s, vW32.4s\n"
-        "fmla vV31.4s, vU52.4s, vW12.4s\n"
-        "ldr qW12, [%x[wptr0], w_col_stride1]\n"
-        "fmla vV21.4s, vU51.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU51.4s, vW11.4s\n"
-        "ldr qW11, [%x[wptr0]], #0x10\n"
-        "fmla vV31.4s, vU72.4s, vW32.4s\n"
-        "ldr qW32, [wptr2, w_col_stride1]\n"
-        "fmla vV31.4s, vU71.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-        "fmul vV13.4s, vU17.4s, vW13.4s\n"
-        "fmul vV12.4s, vU15.4s, vW13.4s\n"
-        "subs %x[n_iters], %x[n_iters], #1\n"
-        "fmla vV13.4s, vU15.4s, vW11.4s\n"
-        "ldr qW31, [wptr2], #0x10\n"
-        "fmla vV13.4s, vU16.4s, vW12.4s\n"
-        "ldr qU26, [uptr1, u_col_stride5]\n"
-        "fmla vV13.4s, vU37.4s, vW33.4s\n"
-        "ldr qU47, [uptr3, u_col_stride6]\n"
-        "fmul vV23.4s, vU37.4s, vW13.4s\n"
-        "ldr qU45, [uptr3, u_col_stride4]\n"
-        "fmla vV12.4s, vU35.4s, vW33.4s\n"
-        "ldr qU46, [uptr3, u_col_stride5]\n"
-        "fmla vV13.4s, vU35.4s, vW31.4s\n"
-        "ldr qU67, [uptr5, u_col_stride6]\n"
-        "fmul vV22.4s, vU35.4s, vW13.4s\n"
-        "bne 1b\n"
-
-        "2:"  // Tail iteration
-        "fmla vV23.4s, vU35.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, u_col_stride4]\n"
-        "fmla vV13.4s, vU36.4s, vW32.4s\n"
-        "fmla vV23.4s, vU36.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, u_col_stride5]\n"
-        "fmla vV13.4s, vU27.4s, vW23.4s\n"
-        "ldr qU57, [uptr4, u_col_stride6]\n"
-        "fmla vV12.4s, vU25.4s, vW23.4s\n"
-        "ldr qU55, [uptr4, u_col_stride4]\n"
-        "fmla vV13.4s, vU25.4s, vW21.4s\n"
-        "ldr qU56, [uptr4, u_col_stride5]\n"
-        "fmla vV13.4s, vU26.4s, vW22.4s\n"
-        "str qV13, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV23.4s, vU47.4s, vW23.4s\n"
-        "ldr qU77, [uptr6, u_col_stride6]\n"
-        "fmla vV22.4s, vU45.4s, vW23.4s\n"
-        "fmla vV23.4s, vU45.4s, vW21.4s\n"
-        "ldr qU75, [uptr6, u_col_stride4]\n"
-        "fmla vV23.4s, vU46.4s, vW22.4s\n"
-        "ldr qU76, [uptr6, u_col_stride5]\n"
-        "fmul vV33.4s, vU67.4s, vW23.4s\n"
-        "ldr qU14, [%x[uptr0], u_col_stride3]\n"
-        "fmul vV32.4s, vU65.4s, vW23.4s\n"
-        "fmla vV33.4s, vU65.4s, vW21.4s\n"
-        "ldr qU13, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV33.4s, vU66.4s, vW22.4s\n"
-        "ldr qU34, [uptr2, u_col_stride3]\n"
-        "fmla vV23.4s, vU57.4s, vW33.4s\n"
-        "fmla vV33.4s, vU57.4s, vW13.4s\n"
-        "ldr qU33, [uptr2, u_col_stride2]\n"
-        "fmla vV22.4s, vU55.4s, vW33.4s\n"
-        "fmla vV23.4s, vU55.4s, vW31.4s\n"
-        "fmla vV32.4s, vU55.4s, vW13.4s\n"
-        "fmla vV33.4s, vU55.4s, vW11.4s\n"
-        "ldr qU24, [uptr1, u_col_stride3]\n"
-        "fmla vV23.4s, vU56.4s, vW32.4s\n"
-        "str qV23, [vptr1, v_col_stride2]\n"
-        "fmla vV33.4s, vU56.4s, vW12.4s\n"
-        "ldr qU23, [uptr1, u_col_stride2]\n"
-        "fmla vV33.4s, vU77.4s, vW33.4s\n"
-        "ldr qU44, [uptr3, u_col_stride3]\n"
-        "fmla vV32.4s, vU75.4s, vW33.4s\n"
-        "fmla vV33.4s, vU75.4s, vW31.4s\n"
-        "ldr qU43, [uptr3, u_col_stride2]\n"
-        "fmla vV33.4s, vU76.4s, vW32.4s\n"
-        "str qV33, [vptr2, v_col_stride2]\n"
-        "ldr qU64, [uptr5, u_col_stride3]\n"
-        "fmla vV12.4s, vU14.4s, vW12.4s\n"
-        "ldr qU63, [uptr5, u_col_stride2]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "fmla vV12.4s, vU13.4s, vW11.4s\n"
-        "ldr qU54, [uptr4, u_col_stride3]\n"
-        "fmla vV12.4s, vU34.4s, vW32.4s\n"
-        "fmla vV22.4s, vU34.4s, vW12.4s\n"
-        "ldr qU53, [uptr4, u_col_stride2]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "ldr qU74, [uptr6, u_col_stride3]\n"
-        "fmla vV12.4s, vU33.4s, vW31.4s\n"
-        "ldr qU73, [uptr6, u_col_stride2]\n"
-        "fmul vV21.4s, vU33.4s, vW13.4s\n"
-        "ldr qU12, [%x[uptr0], u_col_stride1]\n"
-        "fmla vV22.4s, vU33.4s, vW11.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV12.4s, vU24.4s, vW22.4s\n"
-        "ldr qU32, [uptr2, u_col_stride1]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV12.4s, vU23.4s, vW21.4s\n"
-        "str qV12, [%x[vptr0], v_col_stride1]\n"
-        "fmla vV22.4s, vU44.4s, vW22.4s\n"
-        "ldr qU22, [uptr1, u_col_stride1]\n"
-        "fmla vV21.4s, vU43.4s, vW23.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV22.4s, vU43.4s, vW21.4s\n"
-        "ldr qU42, [uptr3, u_col_stride1]\n"
-        "fmla vV32.4s, vU64.4s, vW22.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmul vV31.4s, vU63.4s, vW23.4s\n"
-        "fmla vV32.4s, vU63.4s, vW21.4s\n"
-        "ldr qU62, [uptr5, u_col_stride1]\n"
-        "fmla vV22.4s, vU54.4s, vW32.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV32.4s, vU54.4s, vW12.4s\n"
-        "ldr qU52, [uptr4, u_col_stride1]\n"
-        "fmla vV21.4s, vU53.4s, vW33.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV22.4s, vU53.4s, vW31.4s\n"
-        "str qV22, [vptr1, v_col_stride1]\n"
-        "fmla vV31.4s, vU53.4s, vW13.4s\n"
-        "fmla vV32.4s, vU53.4s, vW11.4s\n"
-        "ldr qU72, [uptr6, u_col_stride1]\n"
-        "fmla vV32.4s, vU74.4s, vW32.4s\n"
-        "ldr qU71, [uptr6], #0x10\n"
-        "fmla vV31.4s, vU73.4s, vW33.4s\n"
-        "fmla vV32.4s, vU73.4s, vW31.4s\n"
-        "str qV32, [vptr2, v_col_stride1]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "fmla vV21.4s, vU32.4s, vW12.4s\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "fmla vV21.4s, vU31.4s, vW11.4s\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW22.4s\n"
-        "fmla vV21.4s, vU41.4s, vW21.4s\n"
-        "fmla vV31.4s, vU62.4s, vW22.4s\n"
-        "fmla vV31.4s, vU61.4s, vW21.4s\n"
-        "fmla vV21.4s, vU52.4s, vW32.4s\n"
-        "fmla vV31.4s, vU52.4s, vW12.4s\n"
-        "fmla vV21.4s, vU51.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU51.4s, vW11.4s\n"
-        "fmla vV31.4s, vU72.4s, vW32.4s\n"
-        "fmla vV31.4s, vU71.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-
-        // Clear aliases
-        ".unreq uptr1\n" ".unreq uptr2\n" ".unreq uptr3\n" ".unreq uptr4\n"
-        ".unreq uptr5\n" ".unreq uptr6\n"
-        ".unreq u_col_stride1\n" ".unreq u_col_stride2\n" ".unreq u_col_stride3\n"
-        ".unreq u_col_stride4\n" ".unreq u_col_stride5\n" ".unreq u_col_stride6\n"
-        ".unreq wptr1\n" ".unreq wptr2\n"
-        ".unreq w_col_stride1\n" ".unreq w_col_stride2\n"
-        ".unreq vptr1\n" ".unreq vptr2\n"
-        ".unreq v_col_stride1\n" ".unreq v_col_stride2\n"
-        ".unreq qU15\n" ".unreq qU73\n" ".unreq qU45\n" ".unreq qU14\n"
-        ".unreq qW13\n" ".unreq qU62\n" ".unreq qV12\n"
-        ".unreq qU51\n" ".unreq qU43\n" ".unreq qU55\n"
-        ".unreq qU77\n" ".unreq qV13\n" ".unreq qV31\n" ".unreq qU44\n"
-        ".unreq qV33\n" ".unreq qU46\n" ".unreq qU11\n" ".unreq qU37\n"
-        ".unreq qU56\n" ".unreq qU25\n" ".unreq qU32\n"
-        ".unreq qU72\n" ".unreq qV22\n"
-        ".unreq qU67\n" ".unreq qU61\n" ".unreq qU13\n" ".unreq qW33\n"
-        ".unreq qU74\n" ".unreq qU34\n" ".unreq qU17\n" ".unreq qU66\n"
-        ".unreq qU33\n" ".unreq qU57\n" ".unreq qU21\n"
-        ".unreq qW23\n" ".unreq qU42\n" ".unreq qV23\n" ".unreq qU23\n"
-        ".unreq qU76\n" ".unreq qU47\n" ".unreq qU64\n" ".unreq qU41\n"
-        ".unreq qU52\n" ".unreq qU54\n" ".unreq qU75\n" ".unreq qU26\n"
-        ".unreq qU53\n" ".unreq qU27\n"
-        ".unreq qV21\n" ".unreq qU65\n"
-        ".unreq qU31\n" ".unreq qU24\n" ".unreq qU36\n" ".unreq qU22\n"
-        ".unreq qU35\n" ".unreq qU63\n" ".unreq qW12\n"
-        ".unreq qV32\n" ".unreq qU16\n" ".unreq qW11\n" ".unreq qU12\n"
-        ".unreq qW31\n" ".unreq qW22\n" ".unreq qU71\n" ".unreq qV11\n"
-        ".unreq qW21\n" ".unreq qW32\n" ".unreq vW13\n"
-        ".unreq vU15\n" ".unreq vU73\n" ".unreq vU45\n" ".unreq vU14\n"
-        ".unreq vU62\n" ".unreq vV12\n"
-        ".unreq vU51\n" ".unreq vU43\n" ".unreq vU55\n"
-        ".unreq vU77\n" ".unreq vV13\n" ".unreq vV31\n" ".unreq vU44\n"
-        ".unreq vV33\n" ".unreq vU46\n" ".unreq vU11\n" ".unreq vU37\n"
-        ".unreq vU56\n" ".unreq vU25\n" ".unreq vU32\n"
-        ".unreq vU72\n" ".unreq vV22\n" ".unreq vW21\n" ".unreq vW32\n"
-        ".unreq vU67\n" ".unreq vU61\n" ".unreq vU13\n"
-        ".unreq vU74\n" ".unreq vU34\n" ".unreq vU17\n" ".unreq vU66\n"
-        ".unreq vU33\n" ".unreq vU57\n" ".unreq vU21\n" ".unreq vW23\n"
-        ".unreq vU42\n" ".unreq vV23\n" ".unreq vU23\n" ".unreq vW33\n"
-        ".unreq vU76\n" ".unreq vU47\n" ".unreq vU64\n" ".unreq vU41\n"
-        ".unreq vU52\n" ".unreq vU54\n" ".unreq vU75\n" ".unreq vU26\n"
-        ".unreq vU53\n" ".unreq vU27\n" ".unreq vV21\n" ".unreq vU65\n"
-        ".unreq vU31\n" ".unreq vU24\n" ".unreq vU36\n" ".unreq vU22\n"
-        ".unreq vU35\n" ".unreq vU63\n" ".unreq vW12\n"
-        ".unreq vV32\n" ".unreq vU16\n" ".unreq vW11\n" ".unreq vU12\n"
-        ".unreq vW31\n" ".unreq vW22\n" ".unreq vU71\n" ".unreq vV11\n"
-        : [uptr0] "+r" (uptr0), [wptr0] "+r" (wptr0), [vptr0] "+r" (vptr0),
-          [n_iters] "+r" (n_iters)
-        : [u_row_stride] "r" (in_row_stride * sizeof(float)),
-          [u_col_stride] "r" (in_col_stride * sizeof(float)),
-          [w_row_stride] "r" (weight_row_stride * sizeof(float)),
-          [w_col_stride] "r" (weight_col_stride * sizeof(float)),
-          [v_row_stride] "r" (out_row_stride * sizeof(float)),
-          [v_col_stride] "r" (out_col_stride * sizeof(float))
-        : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
-          "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
-          "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x0",
-          "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
-          "x12", "x13", "x14", "x15", "x16", "cc", "memory"
-    );
-  }
-  if (channels_remaining)
-  {
-    // Fall back on the unoptimised version to clean up the tail
-    ConvImpl::process_tile<false>(
-        channels_remaining,
-        wptr0, weight_row_stride, weight_col_stride,
-        uptr0, in_row_stride, in_col_stride,
-        vptr0, out_row_stride, out_col_stride,
-        0, 0, 0, 0, 0, 0
-    );
-  }
+  __asm __volatile(
+    "add x15, %[inptr0], %[input_row_stride]\n"
+    "add x26, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x21, %[outptr0], %[output_row_stride]\n"
+    "add x16, x15, %[input_row_stride]\n"
+    "add x27, x26, %[input_col_stride1]\n"
+    "add x22, x21, %[output_row_stride]\n"
+    "add x17, x16, %[input_row_stride]\n"
+    "add x28, x27, %[input_col_stride1]\n"
+    "add x23, %[output_col_stride1], %[output_col_stride1]\n"
+    "add x18, x17, %[input_row_stride]\n"
+    "add x13, x28, %[input_col_stride1]\n"
+    "and x24, %[n_channels], #3\n"
+    "add x19, x18, %[input_row_stride]\n"
+    "add x14, x13, %[input_col_stride1]\n"
+    "lsr x25, %[n_channels], #2\n"
+    "add x20, x19, %[input_row_stride]\n"
+    "cbz x25, 4f\n"
+    "1:\n"
+    "ldr q27, [%[wbptr]]\n"
+    "subs x25, x25, #1\n"
+    "mov v17.16b, v27.16b\n"
+    "ldr q6, [%[wbptr], #16]\n"
+    "mov v16.16b, v27.16b\n"
+    "ldr q14, [%[wbptr], #32]\n"
+    "mov v15.16b, v27.16b\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "mov v2.16b, v27.16b\n"
+    "ldr q12, [%[wbptr], #64]\n"
+    "mov v4.16b, v27.16b\n"
+    "ldr q11, [%[wbptr], #80]\n"
+    "mov v5.16b, v27.16b\n"
+    "ldr q10, [%[wbptr], #96]\n"
+    "mov v1.16b, v27.16b\n"
+    "ldr q9, [%[wbptr], #112]\n"
+    "mov v3.16b, v27.16b\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "mov v0.16b, v27.16b\n"
+    "ldr q7, [%[wbptr], #144]\n"
+    "ldr q29, [%[inptr0]]\n"
+    "ldr q28, [x15]\n"
+    "ldr q26, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q22, [x16]\n"
+    "ldr q20, [x15, %[input_col_stride1]]\n"
+    "ldr q19, [%[inptr0], x26]\n"
+    "ldr q30, [x17]\n"
+    "ldr q18, [x16, %[input_col_stride1]]\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v17.4s, v29.4s, v6.4s\n"
+    "ldr q21, [x15, x26]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "ldr q27, [%[inptr0], x27]\n"
+    "fmla v15.4s, v19.4s, v6.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v28.4s, v12.4s\n"
+    "ldr q25, [x18]\n"
+    "fmla v16.4s, v30.4s, v12.4s\n"
+    "ldr q24, [x17, %[input_col_stride1]]\n"
+    "fmla v15.4s, v21.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v17.4s, v26.4s, v14.4s\n"
+    "ldr q23, [x16, x26]\n"
+    "fmla v16.4s, v18.4s, v14.4s\n"
+    "subs x25, x25, #1\n"
+    "fmla v15.4s, v27.4s, v14.4s\n"
+    "ldr q26, [x15, x27]\n"
+    "fmla v17.4s, v22.4s, v9.4s\n"
+    "ldr q22, [%[inptr0], x28]\n"
+    "fmla v16.4s, v25.4s, v9.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v23.4s, v9.4s\n"
+    "ldr q30, [x19]\n"
+    "fmla v17.4s, v20.4s, v11.4s\n"
+    "ldr q29, [x18, %[input_col_stride1]]\n"
+    "fmla v16.4s, v24.4s, v11.4s\n"
+    "ldr q28, [x17, x26]\n"
+    "fmla v4.4s, v23.4s, v6.4s\n"
+    "fmla v15.4s, v26.4s, v11.4s\n"
+    "fmla v17.4s, v19.4s, v13.4s\n"
+    "ldr q24, [x16, x27]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "ldr q25, [x15, x28]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "fmla v5.4s, v22.4s, v6.4s\n"
+    "fmla v17.4s, v18.4s, v8.4s\n"
+    "ldr q19, [%[inptr0], x13]\n"
+    "fmla v2.4s, v30.4s, v12.4s\n"
+    "ldr q18, [x20]\n"
+    "fmla v16.4s, v29.4s, v8.4s\n"
+    "ldr q22, [x19, %[input_col_stride1]]\n"
+    "fmla v17.4s, v21.4s, v10.4s\n"
+    "ldr q26, [x18, x26]\n"
+    "fmla v2.4s, v29.4s, v14.4s\n"
+    "ldr q20, [x17, x27]\n"
+    "fmla v16.4s, v28.4s, v10.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v17.4s, v23.4s, v7.4s\n"
+    "ldr q27, [x16, x28]\n"
+    "fmla v15.4s, v24.4s, v8.4s\n"
+    "ldr q30, [x15, x13]\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "ldr q24, [%[inptr0], x14]\n"
+    "str q17, [%[outptr0]]\n"
+    "fmla v5.4s, v25.4s, v12.4s\n"
+    "fmla v15.4s, v25.4s, v10.4s\n"
+    "ldr q28, [x20, %[input_col_stride1]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "ldr q17, [x19, x26]\n"
+    "fmla v5.4s, v19.4s, v14.4s\n"
+    "ldr q18, [x18, x27]\n"
+    "fmla v16.4s, v26.4s, v7.4s\n"
+    "ldr q25, [x17, x28]\n"
+    "fmla v2.4s, v22.4s, v11.4s\n"
+    "ldr q22, [x16, x13]\n"
+    "fmla v4.4s, v26.4s, v9.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "str q16, [x21]\n"
+    "fmla v1.4s, v26.4s, v6.4s\n"
+    "fmla v2.4s, v26.4s, v13.4s\n"
+    "ldr q21, [x15, x14]\n"
+    "fmla v4.4s, v20.4s, v11.4s\n"
+    "ldr q23, [x20, x26]\n"
+    "fmla v15.4s, v27.4s, v7.4s\n"
+    "ldr q19, [x19, x27]\n"
+    "fmla v5.4s, v27.4s, v9.4s\n"
+    "add x15, x15, #16\n"
+    "fmla v4.4s, v27.4s, v13.4s\n"
+    "fmla v3.4s, v27.4s, v6.4s\n"
+    "str q15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v2.4s, v28.4s, v8.4s\n"
+    "fmla v5.4s, v30.4s, v11.4s\n"
+    "ldr q29, [x18, x28]\n"
+    "fmla v1.4s, v17.4s, v12.4s\n"
+    "ldr q27, [x17, x13]\n"
+    "fmla v2.4s, v17.4s, v10.4s\n"
+    "ldr q28, [x16, x14]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "ldr q26, [x20, x27]\n"
+    "fmla v4.4s, v18.4s, v8.4s\n"
+    "ldr q20, [x19, x28]\n"
+    "fmla v1.4s, v18.4s, v14.4s\n"
+    "ldr q17, [x18, x13]\n"
+    "fmla v3.4s, v25.4s, v12.4s\n"
+    "ldr q18, [x17, x14]\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "ldr q16, [x20, x28]\n"
+    "fmla v5.4s, v22.4s, v8.4s\n"
+    "add x16, x16, #16\n"
+    "fmla v3.4s, v22.4s, v14.4s\n"
+    "ldr q15, [x19, x13]\n"
+    "fmla v2.4s, v23.4s, v7.4s\n"
+    "add x17, x17, #16\n"
+    "fmla v5.4s, v21.4s, v10.4s\n"
+    "ldr q21, [x18, x14]\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "ldr q23, [x20, x13]\n"
+    "str q2, [x22]\n"
+    "fmla v4.4s, v29.4s, v7.4s\n"
+    "fmla v3.4s, v29.4s, v9.4s\n"
+    "ldr q24, [x19, x14]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "ldr q25, [x20, x14]\n"
+    "str q4, [x21, %[output_col_stride1]]\n"
+    "fmla v0.4s, v29.4s, v6.4s\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "ldr q27, [%[wbptr]]\n"
+    "fmla v1.4s, v29.4s, v13.4s\n"
+    "ldr q29, [%[inptr0]]\n"
+    "fmla v5.4s, v28.4s, v7.4s\n"
+    "ldr q6, [%[wbptr], #16]\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "ldr q28, [x15]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr q26, [%[inptr0], %[input_col_stride1]]\n"
+    "str q5, [%[outptr0], x23]\n"
+    "fmla v0.4s, v20.4s, v12.4s\n"
+    "fmla v3.4s, v17.4s, v8.4s\n"
+    "ldr q22, [x16]\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "ldr q20, [x15, %[input_col_stride1]]\n"
+    "fmla v0.4s, v17.4s, v14.4s\n"
+    "ldr q12, [%[wbptr], #64]\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "ldr q19, [%[inptr0], x26]\n"
+    "fmla v1.4s, v16.4s, v7.4s\n"
+    "ldr q30, [x17]\n"
+    "fmla v0.4s, v16.4s, v9.4s\n"
+    "ldr q14, [%[wbptr], #32]\n"
+    "fmla v3.4s, v21.4s, v7.4s\n"
+    "ldr q18, [x16, %[input_col_stride1]]\n"
+    "str q1, [x22, %[output_col_stride1]]\n"
+    "mov v17.16b, v27.16b\n"
+    "fmla v0.4s, v15.4s, v11.4s\n"
+    "ldr q9, [%[wbptr], #112]\n"
+    "str q3, [x21, x23]\n"
+    "mov v16.16b, v27.16b\n"
+    "mov v15.16b, v27.16b\n"
+    "add x18, x18, #16\n"
+    "fmla v0.4s, v21.4s, v13.4s\n"
+    "ldr q11, [%[wbptr], #80]\n"
+    "mov v2.16b, v27.16b\n"
+    "add x19, x19, #16\n"
+    "mov v4.16b, v27.16b\n"
+    "add x20, x20, #16\n"
+    "fmla v0.4s, v23.4s, v8.4s\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "mov v5.16b, v27.16b\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "mov v1.16b, v27.16b\n"
+    "add x21, x21, #16\n"
+    "fmla v0.4s, v24.4s, v10.4s\n"
+    "ldr q8, [%[wbptr], #128]\n"
+    "mov v3.16b, v27.16b\n"
+    "fmla v0.4s, v25.4s, v7.4s\n"
+    "ldr q10, [%[wbptr], #96]\n"
+    "str q0, [x22, x23]\n"
+    "mov v0.16b, v27.16b\n"
+    "ldr q7, [%[wbptr], #144]\n"
+    "add x22, x22, #16\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v17.4s, v29.4s, v6.4s\n"
+    "ldr q21, [x15, x26]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "ldr q27, [%[inptr0], x27]\n"
+    "fmla v15.4s, v19.4s, v6.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v28.4s, v12.4s\n"
+    "ldr q25, [x18]\n"
+    "fmla v16.4s, v30.4s, v12.4s\n"
+    "ldr q24, [x17, %[input_col_stride1]]\n"
+    "fmla v15.4s, v21.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v17.4s, v26.4s, v14.4s\n"
+    "ldr q23, [x16, x26]\n"
+    "fmla v16.4s, v18.4s, v14.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v27.4s, v14.4s\n"
+    "ldr q26, [x15, x27]\n"
+    "fmla v17.4s, v22.4s, v9.4s\n"
+    "ldr q22, [%[inptr0], x28]\n"
+    "fmla v16.4s, v25.4s, v9.4s\n"
+    "ldr q30, [x19]\n"
+    "fmla v15.4s, v23.4s, v9.4s\n"
+    "fmla v4.4s, v23.4s, v6.4s\n"
+    "fmla v17.4s, v20.4s, v11.4s\n"
+    "ldr q29, [x18, %[input_col_stride1]]\n"
+    "fmla v16.4s, v24.4s, v11.4s\n"
+    "ldr q28, [x17, x26]\n"
+    "fmla v15.4s, v26.4s, v11.4s\n"
+    "ldr q24, [x16, x27]\n"
+    "fmla v17.4s, v19.4s, v13.4s\n"
+    "ldr q25, [x15, x28]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "ldr q19, [%[inptr0], x13]\n"
+    "fmla v17.4s, v18.4s, v8.4s\n"
+    "ldr q18, [x20]\n"
+    "fmla v2.4s, v30.4s, v12.4s\n"
+    "ldr q22, [x19, %[input_col_stride1]]\n"
+    "fmla v16.4s, v29.4s, v8.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v17.4s, v21.4s, v10.4s\n"
+    "ldr q26, [x18, x26]\n"
+    "fmla v2.4s, v29.4s, v14.4s\n"
+    "ldr q20, [x17, x27]\n"
+    "fmla v16.4s, v28.4s, v10.4s\n"
+    "ldr q27, [x16, x28]\n"
+    "fmla v17.4s, v23.4s, v7.4s\n"
+    "ldr q30, [x15, x13]\n"
+    "fmla v15.4s, v24.4s, v8.4s\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "fmla v5.4s, v25.4s, v12.4s\n"
+    "ldr q24, [%[inptr0], x14]\n"
+    "str q17, [%[outptr0]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "fmla v15.4s, v25.4s, v10.4s\n"
+    "ldr q28, [x20, %[input_col_stride1]]\n"
+    "fmla v5.4s, v19.4s, v14.4s\n"
+    "ldr q17, [x19, x26]\n"
+    "fmla v2.4s, v22.4s, v11.4s\n"
+    "ldr q18, [x18, x27]\n"
+    "fmla v16.4s, v26.4s, v7.4s\n"
+    "ldr q25, [x17, x28]\n"
+    "fmla v4.4s, v26.4s, v9.4s\n"
+    "ldr q22, [x16, x13]\n"
+    "fmla v2.4s, v26.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "str q16, [x21]\n"
+    "fmla v1.4s, v26.4s, v6.4s\n"
+    "fmla v4.4s, v20.4s, v11.4s\n"
+    "ldr q21, [x15, x14]\n"
+    "fmla v15.4s, v27.4s, v7.4s\n"
+    "ldr q23, [x20, x26]\n"
+    "fmla v5.4s, v27.4s, v9.4s\n"
+    "ldr q19, [x19, x27]\n"
+    "fmla v4.4s, v27.4s, v13.4s\n"
+    "add x15, x15, #16\n"
+    "str q15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v3.4s, v27.4s, v6.4s\n"
+    "fmla v5.4s, v30.4s, v11.4s\n"
+    "ldr q29, [x18, x28]\n"
+    "fmla v2.4s, v28.4s, v8.4s\n"
+    "ldr q27, [x17, x13]\n"
+    "fmla v1.4s, v17.4s, v12.4s\n"
+    "ldr q28, [x16, x14]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "ldr q26, [x20, x27]\n"
+    "fmla v2.4s, v17.4s, v10.4s\n"
+    "ldr q20, [x19, x28]\n"
+    "fmla v4.4s, v18.4s, v8.4s\n"
+    "ldr q17, [x18, x13]\n"
+    "fmla v1.4s, v18.4s, v14.4s\n"
+    "ldr q18, [x17, x14]\n"
+    "fmla v3.4s, v25.4s, v12.4s\n"
+    "add x16, x16, #16\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "ldr q16, [x20, x28]\n"
+    "fmla v5.4s, v22.4s, v8.4s\n"
+    "add x17, x17, #16\n"
+    "fmla v3.4s, v22.4s, v14.4s\n"
+    "ldr q15, [x19, x13]\n"
+    "fmla v2.4s, v23.4s, v7.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v5.4s, v21.4s, v10.4s\n"
+    "ldr q21, [x18, x14]\n"
+    "fmla v4.4s, v29.4s, v7.4s\n"
+    "ldr q23, [x20, x13]\n"
+    "str q2, [x22]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v3.4s, v29.4s, v9.4s\n"
+    "ldr q24, [x19, x14]\n"
+    "str q4, [x21, %[output_col_stride1]]\n"
+    "fmla v0.4s, v29.4s, v6.4s\n"
+    "fmla v1.4s, v29.4s, v13.4s\n"
+    "ldr q25, [x20, x14]\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "add x18, x18, #16\n"
+    "fmla v5.4s, v28.4s, v7.4s\n"
+    "add x19, x19, #16\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "add x20, x20, #16\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "fmla v0.4s, v20.4s, v12.4s\n"
+    "str q5, [%[outptr0], x23]\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v3.4s, v17.4s, v8.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v0.4s, v17.4s, v14.4s\n"
+    "fmla v1.4s, v16.4s, v7.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v16.4s, v9.4s\n"
+    "str q1, [x22, %[output_col_stride1]]\n"
+    "fmla v3.4s, v21.4s, v7.4s\n"
+    "fmla v0.4s, v15.4s, v11.4s\n"
+    "str q3, [x21, x23]\n"
+    "fmla v0.4s, v21.4s, v13.4s\n"
+    "add x21, x21, #16\n"
+    "fmla v0.4s, v23.4s, v8.4s\n"
+    "fmla v0.4s, v24.4s, v10.4s\n"
+    "fmla v0.4s, v25.4s, v7.4s\n"
+    "str q0, [x22, x23]\n"
+    "add x22, x22, #16\n"
+    "4:\n"
+    "cbz x24, 7f\n"
+    "ldr s27, [%[wbptr]]\n"
+    "mov v17.16b, v27.16b\n"
+    "ldr s6, [%[wbptr], #4]\n"
+    "mov v16.16b, v27.16b\n"
+    "ldr s14, [%[wbptr], #8]\n"
+    "mov v15.16b, v27.16b\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "mov v2.16b, v27.16b\n"
+    "ldr s12, [%[wbptr], #16]\n"
+    "mov v4.16b, v27.16b\n"
+    "ldr s11, [%[wbptr], #20]\n"
+    "mov v5.16b, v27.16b\n"
+    "ldr s10, [%[wbptr], #24]\n"
+    "mov v1.16b, v27.16b\n"
+    "ldr s9, [%[wbptr], #28]\n"
+    "mov v3.16b, v27.16b\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "mov v0.16b, v27.16b\n"
+    "ldr s7, [%[wbptr], #36]\n"
+    "ldr s29, [%[inptr0]]\n"
+    "subs x24, x24, #1\n"
+    "ldr s28, [x15]\n"
+    "ldr s26, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr s22, [x16]\n"
+    "ldr s20, [x15, %[input_col_stride1]]\n"
+    "ldr s19, [%[inptr0], x26]\n"
+    "ldr s30, [x17]\n"
+    "ldr s18, [x16, %[input_col_stride1]]\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v17.4s, v29.4s, v6.4s\n"
+    "ldr s21, [x15, x26]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "ldr s27, [%[inptr0], x27]\n"
+    "fmla v15.4s, v19.4s, v6.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v28.4s, v12.4s\n"
+    "ldr s25, [x18]\n"
+    "fmla v16.4s, v30.4s, v12.4s\n"
+    "ldr s24, [x17, %[input_col_stride1]]\n"
+    "fmla v15.4s, v21.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v17.4s, v26.4s, v14.4s\n"
+    "ldr s23, [x16, x26]\n"
+    "fmla v16.4s, v18.4s, v14.4s\n"
+    "subs x24, x24, #1\n"
+    "fmla v15.4s, v27.4s, v14.4s\n"
+    "ldr s26, [x15, x27]\n"
+    "fmla v17.4s, v22.4s, v9.4s\n"
+    "ldr s22, [%[inptr0], x28]\n"
+    "fmla v16.4s, v25.4s, v9.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v23.4s, v9.4s\n"
+    "ldr s30, [x19]\n"
+    "fmla v17.4s, v20.4s, v11.4s\n"
+    "ldr s29, [x18, %[input_col_stride1]]\n"
+    "fmla v16.4s, v24.4s, v11.4s\n"
+    "ldr s28, [x17, x26]\n"
+    "fmla v4.4s, v23.4s, v6.4s\n"
+    "fmla v15.4s, v26.4s, v11.4s\n"
+    "fmla v17.4s, v19.4s, v13.4s\n"
+    "ldr s24, [x16, x27]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "ldr s25, [x15, x28]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "fmla v5.4s, v22.4s, v6.4s\n"
+    "fmla v17.4s, v18.4s, v8.4s\n"
+    "ldr s19, [%[inptr0], x13]\n"
+    "fmla v2.4s, v30.4s, v12.4s\n"
+    "ldr s18, [x20]\n"
+    "fmla v16.4s, v29.4s, v8.4s\n"
+    "ldr s22, [x19, %[input_col_stride1]]\n"
+    "fmla v17.4s, v21.4s, v10.4s\n"
+    "ldr s26, [x18, x26]\n"
+    "fmla v2.4s, v29.4s, v14.4s\n"
+    "ldr s20, [x17, x27]\n"
+    "fmla v16.4s, v28.4s, v10.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v17.4s, v23.4s, v7.4s\n"
+    "ldr s27, [x16, x28]\n"
+    "fmla v15.4s, v24.4s, v8.4s\n"
+    "ldr s30, [x15, x13]\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "ldr s24, [%[inptr0], x14]\n"
+    "str s17, [%[outptr0]]\n"
+    "fmla v5.4s, v25.4s, v12.4s\n"
+    "fmla v15.4s, v25.4s, v10.4s\n"
+    "ldr s28, [x20, %[input_col_stride1]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "ldr s17, [x19, x26]\n"
+    "fmla v5.4s, v19.4s, v14.4s\n"
+    "ldr s18, [x18, x27]\n"
+    "fmla v16.4s, v26.4s, v7.4s\n"
+    "ldr s25, [x17, x28]\n"
+    "fmla v2.4s, v22.4s, v11.4s\n"
+    "ldr s22, [x16, x13]\n"
+    "fmla v4.4s, v26.4s, v9.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "str s16, [x21]\n"
+    "fmla v1.4s, v26.4s, v6.4s\n"
+    "fmla v2.4s, v26.4s, v13.4s\n"
+    "ldr s21, [x15, x14]\n"
+    "fmla v4.4s, v20.4s, v11.4s\n"
+    "ldr s23, [x20, x26]\n"
+    "fmla v15.4s, v27.4s, v7.4s\n"
+    "ldr s19, [x19, x27]\n"
+    "fmla v5.4s, v27.4s, v9.4s\n"
+    "add x15, x15, #4\n"
+    "fmla v4.4s, v27.4s, v13.4s\n"
+    "fmla v3.4s, v27.4s, v6.4s\n"
+    "str s15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v2.4s, v28.4s, v8.4s\n"
+    "fmla v5.4s, v30.4s, v11.4s\n"
+    "ldr s29, [x18, x28]\n"
+    "fmla v1.4s, v17.4s, v12.4s\n"
+    "ldr s27, [x17, x13]\n"
+    "fmla v2.4s, v17.4s, v10.4s\n"
+    "ldr s28, [x16, x14]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "ldr s26, [x20, x27]\n"
+    "fmla v4.4s, v18.4s, v8.4s\n"
+    "ldr s20, [x19, x28]\n"
+    "fmla v1.4s, v18.4s, v14.4s\n"
+    "ldr s17, [x18, x13]\n"
+    "fmla v3.4s, v25.4s, v12.4s\n"
+    "ldr s18, [x17, x14]\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "ldr s16, [x20, x28]\n"
+    "fmla v5.4s, v22.4s, v8.4s\n"
+    "add x16, x16, #4\n"
+    "fmla v3.4s, v22.4s, v14.4s\n"
+    "ldr s15, [x19, x13]\n"
+    "fmla v2.4s, v23.4s, v7.4s\n"
+    "add x17, x17, #4\n"
+    "fmla v5.4s, v21.4s, v10.4s\n"
+    "ldr s21, [x18, x14]\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "ldr s23, [x20, x13]\n"
+    "str s2, [x22]\n"
+    "fmla v4.4s, v29.4s, v7.4s\n"
+    "fmla v3.4s, v29.4s, v9.4s\n"
+    "ldr s24, [x19, x14]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "ldr s25, [x20, x14]\n"
+    "str s4, [x21, %[output_col_stride1]]\n"
+    "fmla v0.4s, v29.4s, v6.4s\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "ldr s27, [%[wbptr]]\n"
+    "fmla v1.4s, v29.4s, v13.4s\n"
+    "ldr s29, [%[inptr0]]\n"
+    "fmla v5.4s, v28.4s, v7.4s\n"
+    "ldr s6, [%[wbptr], #4]\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "ldr s28, [x15]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr s26, [%[inptr0], %[input_col_stride1]]\n"
+    "str s5, [%[outptr0], x23]\n"
+    "fmla v0.4s, v20.4s, v12.4s\n"
+    "fmla v3.4s, v17.4s, v8.4s\n"
+    "ldr s22, [x16]\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "ldr s20, [x15, %[input_col_stride1]]\n"
+    "fmla v0.4s, v17.4s, v14.4s\n"
+    "ldr s12, [%[wbptr], #16]\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "ldr s19, [%[inptr0], x26]\n"
+    "fmla v1.4s, v16.4s, v7.4s\n"
+    "ldr s30, [x17]\n"
+    "fmla v0.4s, v16.4s, v9.4s\n"
+    "ldr s14, [%[wbptr], #8]\n"
+    "fmla v3.4s, v21.4s, v7.4s\n"
+    "ldr s18, [x16, %[input_col_stride1]]\n"
+    "str s1, [x22, %[output_col_stride1]]\n"
+    "mov v17.16b, v27.16b\n"
+    "fmla v0.4s, v15.4s, v11.4s\n"
+    "ldr s9, [%[wbptr], #28]\n"
+    "str s3, [x21, x23]\n"
+    "mov v16.16b, v27.16b\n"
+    "mov v15.16b, v27.16b\n"
+    "add x18, x18, #4\n"
+    "fmla v0.4s, v21.4s, v13.4s\n"
+    "ldr s11, [%[wbptr], #20]\n"
+    "mov v2.16b, v27.16b\n"
+    "add x19, x19, #4\n"
+    "mov v4.16b, v27.16b\n"
+    "add x20, x20, #4\n"
+    "fmla v0.4s, v23.4s, v8.4s\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "mov v5.16b, v27.16b\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "mov v1.16b, v27.16b\n"
+    "add x21, x21, #4\n"
+    "fmla v0.4s, v24.4s, v10.4s\n"
+    "ldr s8, [%[wbptr], #32]\n"
+    "mov v3.16b, v27.16b\n"
+    "fmla v0.4s, v25.4s, v7.4s\n"
+    "ldr s10, [%[wbptr], #24]\n"
+    "str s0, [x22, x23]\n"
+    "mov v0.16b, v27.16b\n"
+    "ldr s7, [%[wbptr], #36]\n"
+    "add x22, x22, #4\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v17.4s, v29.4s, v6.4s\n"
+    "ldr s21, [x15, x26]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "ldr s27, [%[inptr0], x27]\n"
+    "fmla v15.4s, v19.4s, v6.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v28.4s, v12.4s\n"
+    "ldr s25, [x18]\n"
+    "fmla v16.4s, v30.4s, v12.4s\n"
+    "ldr s24, [x17, %[input_col_stride1]]\n"
+    "fmla v15.4s, v21.4s, v12.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v17.4s, v26.4s, v14.4s\n"
+    "ldr s23, [x16, x26]\n"
+    "fmla v16.4s, v18.4s, v14.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v27.4s, v14.4s\n"
+    "ldr s26, [x15, x27]\n"
+    "fmla v17.4s, v22.4s, v9.4s\n"
+    "ldr s22, [%[inptr0], x28]\n"
+    "fmla v16.4s, v25.4s, v9.4s\n"
+    "ldr s30, [x19]\n"
+    "fmla v15.4s, v23.4s, v9.4s\n"
+    "fmla v4.4s, v23.4s, v6.4s\n"
+    "fmla v17.4s, v20.4s, v11.4s\n"
+    "ldr s29, [x18, %[input_col_stride1]]\n"
+    "fmla v16.4s, v24.4s, v11.4s\n"
+    "ldr s28, [x17, x26]\n"
+    "fmla v15.4s, v26.4s, v11.4s\n"
+    "ldr s24, [x16, x27]\n"
+    "fmla v17.4s, v19.4s, v13.4s\n"
+    "ldr s25, [x15, x28]\n"
+    "fmla v16.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "ldr s19, [%[inptr0], x13]\n"
+    "fmla v17.4s, v18.4s, v8.4s\n"
+    "ldr s18, [x20]\n"
+    "fmla v2.4s, v30.4s, v12.4s\n"
+    "ldr s22, [x19, %[input_col_stride1]]\n"
+    "fmla v16.4s, v29.4s, v8.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v17.4s, v21.4s, v10.4s\n"
+    "ldr s26, [x18, x26]\n"
+    "fmla v2.4s, v29.4s, v14.4s\n"
+    "ldr s20, [x17, x27]\n"
+    "fmla v16.4s, v28.4s, v10.4s\n"
+    "ldr s27, [x16, x28]\n"
+    "fmla v17.4s, v23.4s, v7.4s\n"
+    "ldr s30, [x15, x13]\n"
+    "fmla v15.4s, v24.4s, v8.4s\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "fmla v5.4s, v25.4s, v12.4s\n"
+    "ldr s24, [%[inptr0], x14]\n"
+    "str s17, [%[outptr0]]\n"
+    "fmla v2.4s, v18.4s, v9.4s\n"
+    "fmla v15.4s, v25.4s, v10.4s\n"
+    "ldr s28, [x20, %[input_col_stride1]]\n"
+    "fmla v5.4s, v19.4s, v14.4s\n"
+    "ldr s17, [x19, x26]\n"
+    "fmla v2.4s, v22.4s, v11.4s\n"
+    "ldr s18, [x18, x27]\n"
+    "fmla v16.4s, v26.4s, v7.4s\n"
+    "ldr s25, [x17, x28]\n"
+    "fmla v4.4s, v26.4s, v9.4s\n"
+    "ldr s22, [x16, x13]\n"
+    "fmla v2.4s, v26.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "str s16, [x21]\n"
+    "fmla v1.4s, v26.4s, v6.4s\n"
+    "fmla v4.4s, v20.4s, v11.4s\n"
+    "ldr s21, [x15, x14]\n"
+    "fmla v15.4s, v27.4s, v7.4s\n"
+    "ldr s23, [x20, x26]\n"
+    "fmla v5.4s, v27.4s, v9.4s\n"
+    "ldr s19, [x19, x27]\n"
+    "fmla v4.4s, v27.4s, v13.4s\n"
+    "add x15, x15, #4\n"
+    "str s15, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v3.4s, v27.4s, v6.4s\n"
+    "fmla v5.4s, v30.4s, v11.4s\n"
+    "ldr s29, [x18, x28]\n"
+    "fmla v2.4s, v28.4s, v8.4s\n"
+    "ldr s27, [x17, x13]\n"
+    "fmla v1.4s, v17.4s, v12.4s\n"
+    "ldr s28, [x16, x14]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "ldr s26, [x20, x27]\n"
+    "fmla v2.4s, v17.4s, v10.4s\n"
+    "ldr s20, [x19, x28]\n"
+    "fmla v4.4s, v18.4s, v8.4s\n"
+    "ldr s17, [x18, x13]\n"
+    "fmla v1.4s, v18.4s, v14.4s\n"
+    "ldr s18, [x17, x14]\n"
+    "fmla v3.4s, v25.4s, v12.4s\n"
+    "add x16, x16, #4\n"
+    "fmla v4.4s, v25.4s, v10.4s\n"
+    "ldr s16, [x20, x28]\n"
+    "fmla v5.4s, v22.4s, v8.4s\n"
+    "add x17, x17, #4\n"
+    "fmla v3.4s, v22.4s, v14.4s\n"
+    "ldr s15, [x19, x13]\n"
+    "fmla v2.4s, v23.4s, v7.4s\n"
+    "fmla v1.4s, v23.4s, v9.4s\n"
+    "fmla v5.4s, v21.4s, v10.4s\n"
+    "ldr s21, [x18, x14]\n"
+    "fmla v4.4s, v29.4s, v7.4s\n"
+    "ldr s23, [x20, x13]\n"
+    "str s2, [x22]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v3.4s, v29.4s, v9.4s\n"
+    "ldr s24, [x19, x14]\n"
+    "str s4, [x21, %[output_col_stride1]]\n"
+    "fmla v0.4s, v29.4s, v6.4s\n"
+    "fmla v1.4s, v29.4s, v13.4s\n"
+    "ldr s25, [x20, x14]\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "add x18, x18, #4\n"
+    "fmla v5.4s, v28.4s, v7.4s\n"
+    "add x19, x19, #4\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "add x20, x20, #4\n"
+    "fmla v3.4s, v28.4s, v13.4s\n"
+    "fmla v0.4s, v20.4s, v12.4s\n"
+    "str s5, [%[outptr0], x23]\n"
+    "fmla v1.4s, v20.4s, v10.4s\n"
+    "fmla v3.4s, v17.4s, v8.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v0.4s, v17.4s, v14.4s\n"
+    "fmla v1.4s, v16.4s, v7.4s\n"
+    "fmla v3.4s, v18.4s, v10.4s\n"
+    "fmla v0.4s, v16.4s, v9.4s\n"
+    "str s1, [x22, %[output_col_stride1]]\n"
+    "fmla v3.4s, v21.4s, v7.4s\n"
+    "fmla v0.4s, v15.4s, v11.4s\n"
+    "str s3, [x21, x23]\n"
+    "fmla v0.4s, v21.4s, v13.4s\n"
+    "add x21, x21, #4\n"
+    "fmla v0.4s, v23.4s, v8.4s\n"
+    "fmla v0.4s, v24.4s, v10.4s\n"
+    "fmla v0.4s, v25.4s, v7.4s\n"
+    "str s0, [x22, x23]\n"
+    "add x22, x22, #4\n"
+    "7:\n"
+    : [wbptr] "+r" (weight_bias_ptr), [inptr0] "+r" (input), [outptr0] "+r" (output)
+    : [n_channels] "r" ((long long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x13", "x14", "memory"
+  );
 }
-
 #endif  // __aarch64__
 
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+template class DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>;
 
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 2, 0>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 2>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp16_fp16.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp16_fp16.cpp
deleted file mode 100644
index 33b55df..0000000
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp16_fp16.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "impl_fp16_fp16.hpp"
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-namespace depthwise
-{
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 1, 1, float16_t, float16_t>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 1, 1, float16_t, float16_t>;
-
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
-
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-        ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-        ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 1, 1, float16_t, float16_t>;
-}  // namespace depthwise
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp
index c36c24e..ff0e454 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,1468 +25,3628 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 1, 1, float, float>;
+
+using namespace neon_convolution_kernels;
+using Conv = DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>;
 
 #ifdef __aarch64__
-
 template <>
 template <>
-void ConvImpl::process_tile<true, 0, 0, 0, 0, 0, 0>(
-  const int n_channels,
-  const float* const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float* const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float* const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int, const int, const int, const int, const int, const int, const int, const int
+void Conv::execute_tile<ActivationFunction::None>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
 )
 {
-  constexpr auto inner_tile_rows = DWC::inner_tile_rows;
-  constexpr auto inner_tile_cols = DWC::inner_tile_cols;
-  constexpr auto kernel_rows = DWC::kernel_rows;
-  constexpr auto kernel_cols = DWC::kernel_cols;
-  constexpr auto output_tile_rows = DWC::output_tile_rows;
-  constexpr auto output_tile_cols = DWC::output_tile_cols;
-  constexpr auto stride_rows = DWC::stride_rows;
-  constexpr auto stride_cols = DWC::stride_cols;
-
-  // Extract parameters
-  const int in_pad_top = 0;
-  const int in_pad_left = 0;
-  const int in_pad_bottom = 0;
-  const int in_pad_right = 0;
-  const int out_pad_bottom = 0;
-  const int out_pad_right = 0;
-
-  // Compute valid ranges of the tile
-  const int in_cells_i = inner_tile_rows - in_pad_bottom;
-  const int in_cells_j = inner_tile_cols - in_pad_right;
-  const int out_cells_i = output_tile_rows - out_pad_bottom;
-  const int out_cells_j = output_tile_cols - out_pad_right;
-
-  // Copy pointers
-  const float *uptr0 = inptr;
-  const float *wptr0 = weights;
-  float *vptr0 = outptr;
-  const bool same_strides = (
-    weight_col_stride == in_col_stride &&
-    weight_col_stride == out_col_stride
+  __asm __volatile(
+    "add x8, %[inptr0], %[input_row_stride]\n"
+    "add x15, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x23, %[outptr0], %[output_row_stride]\n"
+    "add x9, x8, %[input_row_stride]\n"
+    "add x16, x15, #64\n"
+    "add x17, x15, %[input_col_stride1]\n"
+    "add x10, x9, %[input_row_stride]\n"
+    "add x18, x17, #64\n"
+    "add x19, x17, %[input_col_stride1]\n"
+    "add x11, x10, %[input_row_stride]\n"
+    "add x20, x19, #64\n"
+    "add x21, x19, %[input_col_stride1]\n"
+    "add x12, x11, %[input_row_stride]\n"
+    "add x22, x21, #64\n"
+    "add x24, x23, %[output_row_stride]\n"
+    "add x25, x24, %[output_row_stride]\n"
+    "add x26, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x13, %[n_channels], #3\n"
+    "add x27, x26, %[output_col_stride1]\n"
+    "lsr x14, %[n_channels], #2\n"
+    "cbz x14, 4f\n"
+    "1:\n"
+    "ldr q14, [%[wbptr]]\n"
+    "subs x14, x14, #1\n"
+    "mov v17.16b, v14.16b\n"
+    "ldr q12, [%[wbptr], #16]\n"
+    "mov v23.16b, v14.16b\n"
+    "ldr q11, [%[wbptr], #32]\n"
+    "mov v24.16b, v14.16b\n"
+    "ldr q10, [%[wbptr], #48]\n"
+    "mov v20.16b, v14.16b\n"
+    "ldr q9, [%[wbptr], #64]\n"
+    "mov v16.16b, v14.16b\n"
+    "ldr q8, [%[wbptr], #80]\n"
+    "mov v13.16b, v14.16b\n"
+    "ldr q7, [%[wbptr], #96]\n"
+    "mov v0.16b, v14.16b\n"
+    "ldr q6, [%[wbptr], #112]\n"
+    "mov v1.16b, v14.16b\n"
+    "ldr q5, [%[wbptr], #128]\n"
+    "mov v2.16b, v14.16b\n"
+    "ldr q4, [%[wbptr], #144]\n"
+    "mov v3.16b, v14.16b\n"
+    "ldr q29, [%[inptr0]]\n"
+    "fmla v17.4s, v29.4s, v12.4s\n"
+    "ldr q28, [x8]\n"
+    "ldr q30, [%[inptr0], %[input_col_stride1]]\n"
+    "ldr q25, [x9]\n"
+    "ldr q26, [x8, %[input_col_stride1]]\n"
+    "ldr q27, [%[inptr0], x15]\n"
+    "ldr q15, [x10]\n"
+    "ldr q18, [x9, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x8, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "prfm pldl1keep, [x8, x28]\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "prfm pldl1keep, [x9, x28]\n"
+    "beq 3f\n"
+    "2:\n"
+    "fmla v17.4s, v28.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x16]\n"
+    "fmla v23.4s, v28.4s, v12.4s\n"
+    "ldr q22, [x8, x15]\n"
+    "fmla v24.4s, v30.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v17.4s, v30.4s, v11.4s\n"
+    "ldr q29, [%[inptr0], x17]\n"
+    "fmla v23.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x11, #64]\n"
+    "fmla v20.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x10, x28]\n"
+    "fmla v17.4s, v25.4s, v6.4s\n"
+    "ldr q25, [x11]\n"
+    "fmla v23.4s, v26.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x18]\n"
+    "fmla v17.4s, v26.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v16.4s, v26.4s, v12.4s\n"
+    "ldr q28, [x10, %[input_col_stride1]]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, #64]\n"
+    "fmla v17.4s, v27.4s, v10.4s\n"
+    "prfm pldl1keep, [x11, x28]\n"
+    "fmla v13.4s, v27.4s, v12.4s\n"
+    "ldr q19, [x9, x15]\n"
+    "fmla v23.4s, v15.4s, v6.4s\n"
+    "prfm pldl1keep, [x10, x16]\n"
+    "fmla v20.4s, v15.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v0.4s, v15.4s, v12.4s\n"
+    "ldr q21, [x8, x17]\n"
+    "fmla v17.4s, v18.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x20]\n"
+    "fmla v23.4s, v18.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v24.4s, v18.4s, v6.4s\n"
+    "prfm pldl1keep, [x12, x28]\n"
+    "fmla v20.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x16]\n"
+    "fmla v16.4s, v18.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x18]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "ldr q27, [%[inptr0], x19]\n"
+    "fmla v17.4s, v22.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x20]\n"
+    "fmla v23.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x8, x22]\n"
+    "fmla v24.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x16]\n"
+    "fmla v16.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x18]\n"
+    "fmla v13.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x20]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr q18, [x12]\n"
+    "fmla v24.4s, v29.4s, v10.4s\n"
+    "prfm pldl1keep, [x9, x22]\n"
+    "fmla v13.4s, v29.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x18]\n"
+    "fmla v3.4s, v29.4s, v12.4s\n"
+    "ldr q22, [x11, %[input_col_stride1]]\n"
+    "fmla v20.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x20]\n"
+    "fmla v0.4s, v25.4s, v9.4s\n"
+    "ldr q25, [x10, x15]\n"
+    "fmla v23.4s, v28.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, x22]\n"
+    "fmla v20.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x20]\n"
+    "fmla v16.4s, v28.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x22]\n"
+    "fmla v0.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x22]\n"
+    "fmla v1.4s, v28.4s, v9.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v23.4s, v19.4s, v7.4s\n"
+    "subs x14, x14, #1\n"
+    "fmla v24.4s, v19.4s, v5.4s\n"
+    "fmla v20.4s, v19.4s, v10.4s\n"
+    "str q17, [%[outptr0]]\n"
+    "mov v15.16b, v14.16b\n"
+    "fmla v16.4s, v19.4s, v8.4s\n"
+    "fmla v13.4s, v19.4s, v6.4s\n"
+    "fmla v15.4s, v28.4s, v12.4s\n"
+    "ldr q29, [x9, x17]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v2.4s, v19.4s, v9.4s\n"
+    "fmla v24.4s, v21.4s, v7.4s\n"
+    "fmla v16.4s, v21.4s, v10.4s\n"
+    "fmla v13.4s, v21.4s, v8.4s\n"
+    "fmla v3.4s, v21.4s, v9.4s\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "fmla v0.4s, v18.4s, v6.4s\n"
+    "mov v18.16b, v14.16b\n"
+    "fmla v20.4s, v22.4s, v5.4s\n"
+    "fmla v13.4s, v27.4s, v10.4s\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "mov v17.16b, v14.16b\n"
+    "fmla v18.4s, v19.4s, v12.4s\n"
+    "mov v19.16b, v14.16b\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "fmla v17.4s, v21.4s, v12.4s\n"
+    "ldr q26, [x8, x19]\n"
+    "fmla v1.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v9.4s\n"
+    "mov v22.16b, v14.16b\n"
+    "mov v21.16b, v14.16b\n"
+    "fmla v23.4s, v25.4s, v4.4s\n"
+    "fmla v20.4s, v25.4s, v7.4s\n"
+    "fmla v16.4s, v25.4s, v5.4s\n"
+    "fmla v0.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v8.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "str q23, [x23]\n"
+    "fmla v15.4s, v25.4s, v11.4s\n"
+    "fmla v18.4s, v25.4s, v9.4s\n"
+    "ldr q28, [%[inptr0], x21]\n"
+    "fmla v19.4s, v25.4s, v12.4s\n"
+    "ldr q30, [x12, %[input_col_stride1]]\n"
+    "fmla v24.4s, v29.4s, v4.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v16.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "str q24, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v1.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "ldr q27, [x11, x15]\n"
+    "fmla v3.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v18.4s, v29.4s, v11.4s\n"
+    "fmla v17.4s, v29.4s, v9.4s\n"
+    "fmla v22.4s, v29.4s, v12.4s\n"
+    "ldr q23, [x10, x17]\n"
+    "fmla v13.4s, v26.4s, v7.4s\n"
+    "fmla v2.4s, v26.4s, v10.4s\n"
+    "fmla v3.4s, v26.4s, v8.4s\n"
+    "fmla v17.4s, v26.4s, v11.4s\n"
+    "fmla v0.4s, v30.4s, v5.4s\n"
+    "ldr q24, [x9, x19]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "ldr q29, [x8, x21]\n"
+    "fmla v3.4s, v28.4s, v10.4s\n"
+    "ldr q14, [x12, x15]\n"
+    "fmla v20.4s, v27.4s, v4.4s\n"
+    "add x8, x8, #16\n"
+    "fmla v0.4s, v27.4s, v7.4s\n"
+    "prfm pldl1keep, [x8, #64]\n"
+    "fmla v1.4s, v27.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x28]\n"
+    "str q20, [x24]\n"
+    "fmla v15.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v6.4s\n"
+    "ldr q25, [x11, x17]\n"
+    "fmla v19.4s, v27.4s, v9.4s\n"
+    "ldr q30, [x10, x19]\n"
+    "fmla v16.4s, v23.4s, v4.4s\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "fmla v2.4s, v23.4s, v5.4s\n"
+    "fmla v15.4s, v23.4s, v10.4s\n"
+    "fmla v18.4s, v23.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v6.4s\n"
+    "str q16, [x23, %[output_col_stride1]]\n"
+    "fmla v19.4s, v23.4s, v11.4s\n"
+    "fmla v22.4s, v23.4s, v9.4s\n"
+    "ldr q26, [x9, x21]\n"
+    "fmla v21.4s, v23.4s, v12.4s\n"
+    "ldr q27, [x12, x17]\n"
+    "fmla v13.4s, v24.4s, v4.4s\n"
+    "ldr q20, [x11, x19]\n"
+    "fmla v2.4s, v24.4s, v7.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v3.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "str q13, [%[outptr0], x26]\n"
+    "fmla v18.4s, v24.4s, v10.4s\n"
+    "fmla v17.4s, v24.4s, v8.4s\n"
+    "ldr q23, [x10, x21]\n"
+    "fmla v22.4s, v24.4s, v11.4s\n"
+    "ldr q24, [x12, x19]\n"
+    "fmla v3.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x28]\n"
+    "fmla v17.4s, v29.4s, v10.4s\n"
+    "ldr q16, [x11, x21]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "add x10, x10, #16\n"
+    "fmla v15.4s, v14.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v19.4s, v14.4s, v6.4s\n"
+    "ldr q13, [x12, x21]\n"
+    "str q0, [x25]\n"
+    "fmla v1.4s, v25.4s, v4.4s\n"
+    "fmla v15.4s, v25.4s, v7.4s\n"
+    "ldr q14, [%[wbptr]]\n"
+    "fmla v18.4s, v25.4s, v5.4s\n"
+    "add x11, x11, #16\n"
+    "str q1, [x24, %[output_col_stride1]]\n"
+    "fmla v19.4s, v25.4s, v8.4s\n"
+    "fmla v22.4s, v25.4s, v6.4s\n"
+    "ldr q12, [%[wbptr], #16]\n"
+    "fmla v21.4s, v25.4s, v9.4s\n"
+    "ldr q29, [%[inptr0]]\n"
+    "fmla v2.4s, v30.4s, v4.4s\n"
+    "ldr q28, [x8]\n"
+    "fmla v18.4s, v30.4s, v7.4s\n"
+    "add x12, x12, #16\n"
+    "fmla v17.4s, v30.4s, v5.4s\n"
+    "fmla v19.4s, v30.4s, v10.4s\n"
+    "str q2, [x23, x26]\n"
+    "fmla v22.4s, v30.4s, v8.4s\n"
+    "fmla v21.4s, v30.4s, v11.4s\n"
+    "ldr q9, [%[wbptr], #64]\n"
+    "fmla v3.4s, v26.4s, v4.4s\n"
+    "ldr q30, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v17.4s, v26.4s, v7.4s\n"
+    "ldr q25, [x9]\n"
+    "fmla v22.4s, v26.4s, v10.4s\n"
+    "ldr q11, [%[wbptr], #32]\n"
+    "str q3, [%[outptr0], x27]\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v19.4s, v27.4s, v5.4s\n"
+    "ldr q26, [x8, %[input_col_stride1]]\n"
+    "fmla v21.4s, v27.4s, v6.4s\n"
+    "ldr q27, [%[inptr0], x15]\n"
+    "str q15, [x25, %[output_col_stride1]]\n"
+    "fmla v18.4s, v20.4s, v4.4s\n"
+    "fmla v19.4s, v20.4s, v7.4s\n"
+    "ldr q15, [x10]\n"
+    "fmla v22.4s, v20.4s, v5.4s\n"
+    "ldr q6, [%[wbptr], #112]\n"
+    "str q18, [x24, x26]\n"
+    "fmla v21.4s, v20.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "ldr q18, [x9, %[input_col_stride1]]\n"
+    "fmla v22.4s, v23.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v21.4s, v23.4s, v10.4s\n"
+    "ldr q8, [%[wbptr], #80]\n"
+    "str q17, [x23, x27]\n"
+    "fmla v19.4s, v24.4s, v4.4s\n"
+    "fmla v22.4s, v16.4s, v4.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v21.4s, v24.4s, v5.4s\n"
+    "ldr q10, [%[wbptr], #48]\n"
+    "str q19, [x25, x26]\n"
+    "mov v17.16b, v14.16b\n"
+    "str q22, [x24, x27]\n"
+    "mov v23.16b, v14.16b\n"
+    "fmla v21.4s, v16.4s, v7.4s\n"
+    "ldr q5, [%[wbptr], #128]\n"
+    "mov v24.16b, v14.16b\n"
+    "add x24, x24, #16\n"
+    "mov v20.16b, v14.16b\n"
+    "mov v16.16b, v14.16b\n"
+    "fmla v21.4s, v13.4s, v4.4s\n"
+    "ldr q7, [%[wbptr], #96]\n"
+    "mov v13.16b, v14.16b\n"
+    "mov v0.16b, v14.16b\n"
+    "mov v1.16b, v14.16b\n"
+    "mov v2.16b, v14.16b\n"
+    "str q21, [x25, x27]\n"
+    "mov v3.16b, v14.16b\n"
+    "ldr q4, [%[wbptr], #144]\n"
+    "add x25, x25, #16\n"
+    "fmla v17.4s, v29.4s, v12.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "fmla v17.4s, v28.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x16]\n"
+    "fmla v23.4s, v28.4s, v12.4s\n"
+    "ldr q22, [x8, x15]\n"
+    "fmla v24.4s, v30.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v17.4s, v30.4s, v11.4s\n"
+    "ldr q29, [%[inptr0], x17]\n"
+    "fmla v23.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x11, #64]\n"
+    "fmla v20.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x10, x28]\n"
+    "fmla v17.4s, v25.4s, v6.4s\n"
+    "ldr q25, [x11]\n"
+    "fmla v23.4s, v26.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x18]\n"
+    "fmla v17.4s, v26.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v16.4s, v26.4s, v12.4s\n"
+    "ldr q28, [x10, %[input_col_stride1]]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, #64]\n"
+    "fmla v17.4s, v27.4s, v10.4s\n"
+    "prfm pldl1keep, [x11, x28]\n"
+    "fmla v13.4s, v27.4s, v12.4s\n"
+    "ldr q19, [x9, x15]\n"
+    "fmla v23.4s, v15.4s, v6.4s\n"
+    "prfm pldl1keep, [x10, x16]\n"
+    "fmla v20.4s, v15.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v0.4s, v15.4s, v12.4s\n"
+    "ldr q21, [x8, x17]\n"
+    "fmla v17.4s, v18.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x20]\n"
+    "fmla v23.4s, v18.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v24.4s, v18.4s, v6.4s\n"
+    "prfm pldl1keep, [x12, x28]\n"
+    "fmla v20.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x16]\n"
+    "fmla v16.4s, v18.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x18]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "ldr q27, [%[inptr0], x19]\n"
+    "fmla v17.4s, v22.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x20]\n"
+    "fmla v23.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x8, x22]\n"
+    "fmla v24.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x16]\n"
+    "fmla v16.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x18]\n"
+    "fmla v13.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x20]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr q18, [x12]\n"
+    "fmla v24.4s, v29.4s, v10.4s\n"
+    "prfm pldl1keep, [x9, x22]\n"
+    "fmla v13.4s, v29.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x18]\n"
+    "fmla v3.4s, v29.4s, v12.4s\n"
+    "ldr q22, [x11, %[input_col_stride1]]\n"
+    "fmla v20.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x20]\n"
+    "fmla v0.4s, v25.4s, v9.4s\n"
+    "ldr q25, [x10, x15]\n"
+    "fmla v23.4s, v28.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, x22]\n"
+    "fmla v20.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x20]\n"
+    "fmla v16.4s, v28.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x22]\n"
+    "fmla v0.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x22]\n"
+    "fmla v1.4s, v28.4s, v9.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v17.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v23.4s, v19.4s, v7.4s\n"
+    "fmla v24.4s, v19.4s, v5.4s\n"
+    "fmla v20.4s, v19.4s, v10.4s\n"
+    "fmla v16.4s, v19.4s, v8.4s\n"
+    "str q17, [%[outptr0]]\n"
+    "mov v15.16b, v14.16b\n"
+    "fmla v13.4s, v19.4s, v6.4s\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v15.4s, v28.4s, v12.4s\n"
+    "ldr q29, [x9, x17]\n"
+    "fmla v2.4s, v19.4s, v9.4s\n"
+    "fmla v24.4s, v21.4s, v7.4s\n"
+    "fmla v16.4s, v21.4s, v10.4s\n"
+    "fmla v13.4s, v21.4s, v8.4s\n"
+    "fmla v3.4s, v21.4s, v9.4s\n"
+    "fmla v0.4s, v18.4s, v6.4s\n"
+    "mov v18.16b, v14.16b\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "fmla v13.4s, v27.4s, v10.4s\n"
+    "fmla v20.4s, v22.4s, v5.4s\n"
+    "fmla v18.4s, v19.4s, v12.4s\n"
+    "ldr q26, [x8, x19]\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "ldr q28, [%[inptr0], x21]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v1.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v9.4s\n"
+    "mov v17.16b, v14.16b\n"
+    "fmla v23.4s, v25.4s, v4.4s\n"
+    "fmla v20.4s, v25.4s, v7.4s\n"
+    "fmla v16.4s, v25.4s, v5.4s\n"
+    "fmla v17.4s, v21.4s, v12.4s\n"
+    "ldr q30, [x12, %[input_col_stride1]]\n"
+    "str q23, [x23]\n"
+    "mov v19.16b, v14.16b\n"
+    "fmla v0.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v8.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v25.4s, v11.4s\n"
+    "fmla v18.4s, v25.4s, v9.4s\n"
+    "fmla v19.4s, v25.4s, v12.4s\n"
+    "mov v22.16b, v14.16b\n"
+    "mov v21.16b, v14.16b\n"
+    "fmla v24.4s, v29.4s, v4.4s\n"
+    "fmla v16.4s, v29.4s, v7.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v1.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v3.4s, v29.4s, v6.4s\n"
+    "str q24, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v18.4s, v29.4s, v11.4s\n"
+    "fmla v17.4s, v29.4s, v9.4s\n"
+    "ldr q27, [x11, x15]\n"
+    "fmla v22.4s, v29.4s, v12.4s\n"
+    "ldr q23, [x10, x17]\n"
+    "fmla v13.4s, v26.4s, v7.4s\n"
+    "fmla v2.4s, v26.4s, v10.4s\n"
+    "fmla v3.4s, v26.4s, v8.4s\n"
+    "fmla v17.4s, v26.4s, v11.4s\n"
+    "fmla v0.4s, v30.4s, v5.4s\n"
+    "ldr q24, [x9, x19]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "ldr q29, [x8, x21]\n"
+    "fmla v3.4s, v28.4s, v10.4s\n"
+    "ldr q14, [x12, x15]\n"
+    "fmla v20.4s, v27.4s, v4.4s\n"
+    "add x8, x8, #16\n"
+    "fmla v0.4s, v27.4s, v7.4s\n"
+    "fmla v1.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v6.4s\n"
+    "str q20, [x24]\n"
+    "fmla v19.4s, v27.4s, v9.4s\n"
+    "fmla v16.4s, v23.4s, v4.4s\n"
+    "ldr q25, [x11, x17]\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "ldr q30, [x10, x19]\n"
+    "fmla v2.4s, v23.4s, v5.4s\n"
+    "fmla v15.4s, v23.4s, v10.4s\n"
+    "str q16, [x23, %[output_col_stride1]]\n"
+    "fmla v18.4s, v23.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v6.4s\n"
+    "ldr q26, [x9, x21]\n"
+    "fmla v19.4s, v23.4s, v11.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v22.4s, v23.4s, v9.4s\n"
+    "fmla v21.4s, v23.4s, v12.4s\n"
+    "fmla v13.4s, v24.4s, v4.4s\n"
+    "ldr q27, [x12, x17]\n"
+    "fmla v2.4s, v24.4s, v7.4s\n"
+    "ldr q20, [x11, x19]\n"
+    "fmla v3.4s, v24.4s, v5.4s\n"
+    "fmla v18.4s, v24.4s, v10.4s\n"
+    "str q13, [%[outptr0], x26]\n"
+    "fmla v17.4s, v24.4s, v8.4s\n"
+    "fmla v22.4s, v24.4s, v11.4s\n"
+    "ldr q23, [x10, x21]\n"
+    "fmla v3.4s, v29.4s, v7.4s\n"
+    "ldr q24, [x12, x19]\n"
+    "fmla v17.4s, v29.4s, v10.4s\n"
+    "ldr q16, [x11, x21]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "add x10, x10, #16\n"
+    "fmla v15.4s, v14.4s, v5.4s\n"
+    "add x11, x11, #16\n"
+    "fmla v19.4s, v14.4s, v6.4s\n"
+    "ldr q13, [x12, x21]\n"
+    "str q0, [x25]\n"
+    "fmla v1.4s, v25.4s, v4.4s\n"
+    "fmla v15.4s, v25.4s, v7.4s\n"
+    "add x12, x12, #16\n"
+    "fmla v18.4s, v25.4s, v5.4s\n"
+    "fmla v19.4s, v25.4s, v8.4s\n"
+    "str q1, [x24, %[output_col_stride1]]\n"
+    "fmla v22.4s, v25.4s, v6.4s\n"
+    "fmla v21.4s, v25.4s, v9.4s\n"
+    "fmla v2.4s, v30.4s, v4.4s\n"
+    "fmla v18.4s, v30.4s, v7.4s\n"
+    "fmla v17.4s, v30.4s, v5.4s\n"
+    "fmla v19.4s, v30.4s, v10.4s\n"
+    "fmla v22.4s, v30.4s, v8.4s\n"
+    "str q2, [x23, x26]\n"
+    "fmla v21.4s, v30.4s, v11.4s\n"
+    "fmla v3.4s, v26.4s, v4.4s\n"
+    "fmla v17.4s, v26.4s, v7.4s\n"
+    "fmla v22.4s, v26.4s, v10.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v19.4s, v27.4s, v5.4s\n"
+    "fmla v21.4s, v27.4s, v6.4s\n"
+    "str q3, [%[outptr0], x27]\n"
+    "fmla v18.4s, v20.4s, v4.4s\n"
+    "str q15, [x25, %[output_col_stride1]]\n"
+    "fmla v22.4s, v20.4s, v5.4s\n"
+    "fmla v19.4s, v20.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "str q18, [x24, x26]\n"
+    "fmla v21.4s, v20.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "fmla v22.4s, v23.4s, v7.4s\n"
+    "fmla v19.4s, v24.4s, v4.4s\n"
+    "fmla v21.4s, v23.4s, v10.4s\n"
+    "str q17, [x23, x27]\n"
+    "fmla v22.4s, v16.4s, v4.4s\n"
+    "str q19, [x25, x26]\n"
+    "add x23, x23, #16\n"
+    "fmla v21.4s, v24.4s, v5.4s\n"
+    "str q22, [x24, x27]\n"
+    "add x24, x24, #16\n"
+    "fmla v21.4s, v16.4s, v7.4s\n"
+    "fmla v21.4s, v13.4s, v4.4s\n"
+    "str q21, [x25, x27]\n"
+    "add x25, x25, #16\n"
+    "4:\n"
+    "cbz x13, 7f\n"
+    "ldr s14, [%[wbptr]]\n"
+    "mov v17.16b, v14.16b\n"
+    "ldr s12, [%[wbptr], #4]\n"
+    "mov v23.16b, v14.16b\n"
+    "ldr s11, [%[wbptr], #8]\n"
+    "mov v24.16b, v14.16b\n"
+    "ldr s10, [%[wbptr], #12]\n"
+    "mov v20.16b, v14.16b\n"
+    "ldr s9, [%[wbptr], #16]\n"
+    "mov v16.16b, v14.16b\n"
+    "ldr s8, [%[wbptr], #20]\n"
+    "mov v13.16b, v14.16b\n"
+    "ldr s7, [%[wbptr], #24]\n"
+    "mov v0.16b, v14.16b\n"
+    "ldr s6, [%[wbptr], #28]\n"
+    "mov v1.16b, v14.16b\n"
+    "ldr s5, [%[wbptr], #32]\n"
+    "mov v2.16b, v14.16b\n"
+    "ldr s4, [%[wbptr], #36]\n"
+    "mov v3.16b, v14.16b\n"
+    "ldr s29, [%[inptr0]]\n"
+    "fmla v17.4s, v29.4s, v12.4s\n"
+    "ldr s28, [x8]\n"
+    "ldr s30, [%[inptr0], %[input_col_stride1]]\n"
+    "subs x13, x13, #1\n"
+    "ldr s25, [x9]\n"
+    "ldr s26, [x8, %[input_col_stride1]]\n"
+    "ldr s27, [%[inptr0], x15]\n"
+    "ldr s15, [x10]\n"
+    "ldr s18, [x9, %[input_col_stride1]]\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x8, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "prfm pldl1keep, [x8, x28]\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "prfm pldl1keep, [x9, x28]\n"
+    "beq 6f\n"
+    "5:\n"
+    "fmla v17.4s, v28.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x16]\n"
+    "fmla v23.4s, v28.4s, v12.4s\n"
+    "ldr s22, [x8, x15]\n"
+    "fmla v24.4s, v30.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v17.4s, v30.4s, v11.4s\n"
+    "ldr s29, [%[inptr0], x17]\n"
+    "fmla v23.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x11, #64]\n"
+    "fmla v20.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x10, x28]\n"
+    "fmla v17.4s, v25.4s, v6.4s\n"
+    "ldr s25, [x11]\n"
+    "fmla v23.4s, v26.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x18]\n"
+    "fmla v17.4s, v26.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v16.4s, v26.4s, v12.4s\n"
+    "ldr s28, [x10, %[input_col_stride1]]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, #64]\n"
+    "fmla v17.4s, v27.4s, v10.4s\n"
+    "prfm pldl1keep, [x11, x28]\n"
+    "fmla v13.4s, v27.4s, v12.4s\n"
+    "ldr s19, [x9, x15]\n"
+    "fmla v23.4s, v15.4s, v6.4s\n"
+    "prfm pldl1keep, [x10, x16]\n"
+    "fmla v20.4s, v15.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v0.4s, v15.4s, v12.4s\n"
+    "ldr s21, [x8, x17]\n"
+    "fmla v17.4s, v18.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x20]\n"
+    "fmla v23.4s, v18.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v24.4s, v18.4s, v6.4s\n"
+    "prfm pldl1keep, [x12, x28]\n"
+    "fmla v20.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x16]\n"
+    "fmla v16.4s, v18.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x18]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "ldr s27, [%[inptr0], x19]\n"
+    "fmla v17.4s, v22.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x20]\n"
+    "fmla v23.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x8, x22]\n"
+    "fmla v24.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x16]\n"
+    "fmla v16.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x18]\n"
+    "fmla v13.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x20]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr s18, [x12]\n"
+    "fmla v24.4s, v29.4s, v10.4s\n"
+    "prfm pldl1keep, [x9, x22]\n"
+    "fmla v13.4s, v29.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x18]\n"
+    "fmla v3.4s, v29.4s, v12.4s\n"
+    "ldr s22, [x11, %[input_col_stride1]]\n"
+    "fmla v20.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x20]\n"
+    "fmla v0.4s, v25.4s, v9.4s\n"
+    "ldr s25, [x10, x15]\n"
+    "fmla v23.4s, v28.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, x22]\n"
+    "fmla v20.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x20]\n"
+    "fmla v16.4s, v28.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x22]\n"
+    "fmla v0.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x22]\n"
+    "fmla v1.4s, v28.4s, v9.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v23.4s, v19.4s, v7.4s\n"
+    "subs x13, x13, #1\n"
+    "fmla v24.4s, v19.4s, v5.4s\n"
+    "fmla v20.4s, v19.4s, v10.4s\n"
+    "str s17, [%[outptr0]]\n"
+    "mov v15.16b, v14.16b\n"
+    "fmla v16.4s, v19.4s, v8.4s\n"
+    "fmla v13.4s, v19.4s, v6.4s\n"
+    "fmla v15.4s, v28.4s, v12.4s\n"
+    "ldr s29, [x9, x17]\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v2.4s, v19.4s, v9.4s\n"
+    "fmla v24.4s, v21.4s, v7.4s\n"
+    "fmla v16.4s, v21.4s, v10.4s\n"
+    "fmla v13.4s, v21.4s, v8.4s\n"
+    "fmla v3.4s, v21.4s, v9.4s\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "fmla v0.4s, v18.4s, v6.4s\n"
+    "mov v18.16b, v14.16b\n"
+    "fmla v20.4s, v22.4s, v5.4s\n"
+    "fmla v13.4s, v27.4s, v10.4s\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "mov v17.16b, v14.16b\n"
+    "fmla v18.4s, v19.4s, v12.4s\n"
+    "mov v19.16b, v14.16b\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "fmla v17.4s, v21.4s, v12.4s\n"
+    "ldr s26, [x8, x19]\n"
+    "fmla v1.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v9.4s\n"
+    "mov v22.16b, v14.16b\n"
+    "mov v21.16b, v14.16b\n"
+    "fmla v23.4s, v25.4s, v4.4s\n"
+    "fmla v20.4s, v25.4s, v7.4s\n"
+    "fmla v16.4s, v25.4s, v5.4s\n"
+    "fmla v0.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v8.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "str s23, [x23]\n"
+    "fmla v15.4s, v25.4s, v11.4s\n"
+    "fmla v18.4s, v25.4s, v9.4s\n"
+    "ldr s28, [%[inptr0], x21]\n"
+    "fmla v19.4s, v25.4s, v12.4s\n"
+    "ldr s30, [x12, %[input_col_stride1]]\n"
+    "fmla v24.4s, v29.4s, v4.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v16.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x28]\n"
+    "str s24, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v1.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "ldr s27, [x11, x15]\n"
+    "fmla v3.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v18.4s, v29.4s, v11.4s\n"
+    "fmla v17.4s, v29.4s, v9.4s\n"
+    "fmla v22.4s, v29.4s, v12.4s\n"
+    "ldr s23, [x10, x17]\n"
+    "fmla v13.4s, v26.4s, v7.4s\n"
+    "fmla v2.4s, v26.4s, v10.4s\n"
+    "fmla v3.4s, v26.4s, v8.4s\n"
+    "fmla v17.4s, v26.4s, v11.4s\n"
+    "fmla v0.4s, v30.4s, v5.4s\n"
+    "ldr s24, [x9, x19]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "ldr s29, [x8, x21]\n"
+    "fmla v3.4s, v28.4s, v10.4s\n"
+    "ldr s14, [x12, x15]\n"
+    "fmla v20.4s, v27.4s, v4.4s\n"
+    "add x8, x8, #4\n"
+    "fmla v0.4s, v27.4s, v7.4s\n"
+    "prfm pldl1keep, [x8, #64]\n"
+    "fmla v1.4s, v27.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x28]\n"
+    "str s20, [x24]\n"
+    "fmla v15.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v6.4s\n"
+    "ldr s25, [x11, x17]\n"
+    "fmla v19.4s, v27.4s, v9.4s\n"
+    "ldr s30, [x10, x19]\n"
+    "fmla v16.4s, v23.4s, v4.4s\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "fmla v2.4s, v23.4s, v5.4s\n"
+    "fmla v15.4s, v23.4s, v10.4s\n"
+    "fmla v18.4s, v23.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v6.4s\n"
+    "str s16, [x23, %[output_col_stride1]]\n"
+    "fmla v19.4s, v23.4s, v11.4s\n"
+    "fmla v22.4s, v23.4s, v9.4s\n"
+    "ldr s26, [x9, x21]\n"
+    "fmla v21.4s, v23.4s, v12.4s\n"
+    "ldr s27, [x12, x17]\n"
+    "fmla v13.4s, v24.4s, v4.4s\n"
+    "ldr s20, [x11, x19]\n"
+    "fmla v2.4s, v24.4s, v7.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v3.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "str s13, [%[outptr0], x26]\n"
+    "fmla v18.4s, v24.4s, v10.4s\n"
+    "fmla v17.4s, v24.4s, v8.4s\n"
+    "ldr s23, [x10, x21]\n"
+    "fmla v22.4s, v24.4s, v11.4s\n"
+    "ldr s24, [x12, x19]\n"
+    "fmla v3.4s, v29.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x28]\n"
+    "fmla v17.4s, v29.4s, v10.4s\n"
+    "ldr s16, [x11, x21]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "add x10, x10, #4\n"
+    "fmla v15.4s, v14.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v19.4s, v14.4s, v6.4s\n"
+    "ldr s13, [x12, x21]\n"
+    "str s0, [x25]\n"
+    "fmla v1.4s, v25.4s, v4.4s\n"
+    "fmla v15.4s, v25.4s, v7.4s\n"
+    "ldr s14, [%[wbptr]]\n"
+    "fmla v18.4s, v25.4s, v5.4s\n"
+    "add x11, x11, #4\n"
+    "str s1, [x24, %[output_col_stride1]]\n"
+    "fmla v19.4s, v25.4s, v8.4s\n"
+    "fmla v22.4s, v25.4s, v6.4s\n"
+    "ldr s12, [%[wbptr], #4]\n"
+    "fmla v21.4s, v25.4s, v9.4s\n"
+    "ldr s29, [%[inptr0]]\n"
+    "fmla v2.4s, v30.4s, v4.4s\n"
+    "ldr s28, [x8]\n"
+    "fmla v18.4s, v30.4s, v7.4s\n"
+    "add x12, x12, #4\n"
+    "fmla v17.4s, v30.4s, v5.4s\n"
+    "fmla v19.4s, v30.4s, v10.4s\n"
+    "str s2, [x23, x26]\n"
+    "fmla v22.4s, v30.4s, v8.4s\n"
+    "fmla v21.4s, v30.4s, v11.4s\n"
+    "ldr s9, [%[wbptr], #16]\n"
+    "fmla v3.4s, v26.4s, v4.4s\n"
+    "ldr s30, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v17.4s, v26.4s, v7.4s\n"
+    "ldr s25, [x9]\n"
+    "fmla v22.4s, v26.4s, v10.4s\n"
+    "ldr s11, [%[wbptr], #8]\n"
+    "str s3, [%[outptr0], x27]\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v19.4s, v27.4s, v5.4s\n"
+    "ldr s26, [x8, %[input_col_stride1]]\n"
+    "fmla v21.4s, v27.4s, v6.4s\n"
+    "ldr s27, [%[inptr0], x15]\n"
+    "str s15, [x25, %[output_col_stride1]]\n"
+    "fmla v18.4s, v20.4s, v4.4s\n"
+    "fmla v19.4s, v20.4s, v7.4s\n"
+    "ldr s15, [x10]\n"
+    "fmla v22.4s, v20.4s, v5.4s\n"
+    "ldr s6, [%[wbptr], #28]\n"
+    "str s18, [x24, x26]\n"
+    "fmla v21.4s, v20.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "ldr s18, [x9, %[input_col_stride1]]\n"
+    "fmla v22.4s, v23.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v21.4s, v23.4s, v10.4s\n"
+    "ldr s8, [%[wbptr], #20]\n"
+    "str s17, [x23, x27]\n"
+    "fmla v19.4s, v24.4s, v4.4s\n"
+    "fmla v22.4s, v16.4s, v4.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v21.4s, v24.4s, v5.4s\n"
+    "ldr s10, [%[wbptr], #12]\n"
+    "str s19, [x25, x26]\n"
+    "mov v17.16b, v14.16b\n"
+    "str s22, [x24, x27]\n"
+    "mov v23.16b, v14.16b\n"
+    "fmla v21.4s, v16.4s, v7.4s\n"
+    "ldr s5, [%[wbptr], #32]\n"
+    "mov v24.16b, v14.16b\n"
+    "add x24, x24, #4\n"
+    "mov v20.16b, v14.16b\n"
+    "mov v16.16b, v14.16b\n"
+    "fmla v21.4s, v13.4s, v4.4s\n"
+    "ldr s7, [%[wbptr], #24]\n"
+    "mov v13.16b, v14.16b\n"
+    "mov v0.16b, v14.16b\n"
+    "mov v1.16b, v14.16b\n"
+    "mov v2.16b, v14.16b\n"
+    "str s21, [x25, x27]\n"
+    "mov v3.16b, v14.16b\n"
+    "ldr s4, [%[wbptr], #36]\n"
+    "add x25, x25, #4\n"
+    "fmla v17.4s, v29.4s, v12.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "fmla v17.4s, v28.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x16]\n"
+    "fmla v23.4s, v28.4s, v12.4s\n"
+    "ldr s22, [x8, x15]\n"
+    "fmla v24.4s, v30.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v17.4s, v30.4s, v11.4s\n"
+    "ldr s29, [%[inptr0], x17]\n"
+    "fmla v23.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x11, #64]\n"
+    "fmla v20.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x10, x28]\n"
+    "fmla v17.4s, v25.4s, v6.4s\n"
+    "ldr s25, [x11]\n"
+    "fmla v23.4s, v26.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [x8, x18]\n"
+    "fmla v17.4s, v26.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x20]\n"
+    "fmla v16.4s, v26.4s, v12.4s\n"
+    "ldr s28, [x10, %[input_col_stride1]]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, #64]\n"
+    "fmla v17.4s, v27.4s, v10.4s\n"
+    "prfm pldl1keep, [x11, x28]\n"
+    "fmla v13.4s, v27.4s, v12.4s\n"
+    "ldr s19, [x9, x15]\n"
+    "fmla v23.4s, v15.4s, v6.4s\n"
+    "prfm pldl1keep, [x10, x16]\n"
+    "fmla v20.4s, v15.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v0.4s, v15.4s, v12.4s\n"
+    "ldr s21, [x8, x17]\n"
+    "fmla v17.4s, v18.4s, v5.4s\n"
+    "prfm pldl1keep, [x8, x20]\n"
+    "fmla v23.4s, v18.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x22]\n"
+    "fmla v24.4s, v18.4s, v6.4s\n"
+    "prfm pldl1keep, [x12, x28]\n"
+    "fmla v20.4s, v18.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x16]\n"
+    "fmla v16.4s, v18.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x18]\n"
+    "fmla v1.4s, v18.4s, v12.4s\n"
+    "ldr s27, [%[inptr0], x19]\n"
+    "fmla v17.4s, v22.4s, v7.4s\n"
+    "prfm pldl1keep, [x9, x20]\n"
+    "fmla v23.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x8, x22]\n"
+    "fmla v24.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x16]\n"
+    "fmla v16.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x11, x18]\n"
+    "fmla v13.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x20]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "ldr s18, [x12]\n"
+    "fmla v24.4s, v29.4s, v10.4s\n"
+    "prfm pldl1keep, [x9, x22]\n"
+    "fmla v13.4s, v29.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x18]\n"
+    "fmla v3.4s, v29.4s, v12.4s\n"
+    "ldr s22, [x11, %[input_col_stride1]]\n"
+    "fmla v20.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x20]\n"
+    "fmla v0.4s, v25.4s, v9.4s\n"
+    "ldr s25, [x10, x15]\n"
+    "fmla v23.4s, v28.4s, v5.4s\n"
+    "prfm pldl1keep, [x10, x22]\n"
+    "fmla v20.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x12, x20]\n"
+    "fmla v16.4s, v28.4s, v6.4s\n"
+    "prfm pldl1keep, [x11, x22]\n"
+    "fmla v0.4s, v28.4s, v11.4s\n"
+    "prfm pldl1keep, [x12, x22]\n"
+    "fmla v1.4s, v28.4s, v9.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v17.4s, v19.4s, v4.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v23.4s, v19.4s, v7.4s\n"
+    "fmla v24.4s, v19.4s, v5.4s\n"
+    "fmla v20.4s, v19.4s, v10.4s\n"
+    "fmla v16.4s, v19.4s, v8.4s\n"
+    "str s17, [%[outptr0]]\n"
+    "mov v15.16b, v14.16b\n"
+    "fmla v13.4s, v19.4s, v6.4s\n"
+    "fmla v1.4s, v19.4s, v11.4s\n"
+    "fmla v15.4s, v28.4s, v12.4s\n"
+    "ldr s29, [x9, x17]\n"
+    "fmla v2.4s, v19.4s, v9.4s\n"
+    "fmla v24.4s, v21.4s, v7.4s\n"
+    "fmla v16.4s, v21.4s, v10.4s\n"
+    "fmla v13.4s, v21.4s, v8.4s\n"
+    "fmla v3.4s, v21.4s, v9.4s\n"
+    "fmla v0.4s, v18.4s, v6.4s\n"
+    "mov v18.16b, v14.16b\n"
+    "fmla v2.4s, v21.4s, v11.4s\n"
+    "fmla v13.4s, v27.4s, v10.4s\n"
+    "fmla v20.4s, v22.4s, v5.4s\n"
+    "fmla v18.4s, v19.4s, v12.4s\n"
+    "ldr s26, [x8, x19]\n"
+    "fmla v3.4s, v27.4s, v11.4s\n"
+    "ldr s28, [%[inptr0], x21]\n"
+    "fmla v0.4s, v22.4s, v8.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v1.4s, v22.4s, v6.4s\n"
+    "fmla v15.4s, v22.4s, v9.4s\n"
+    "mov v17.16b, v14.16b\n"
+    "fmla v23.4s, v25.4s, v4.4s\n"
+    "fmla v20.4s, v25.4s, v7.4s\n"
+    "fmla v16.4s, v25.4s, v5.4s\n"
+    "fmla v17.4s, v21.4s, v12.4s\n"
+    "ldr s30, [x12, %[input_col_stride1]]\n"
+    "str s23, [x23]\n"
+    "mov v19.16b, v14.16b\n"
+    "fmla v0.4s, v25.4s, v10.4s\n"
+    "fmla v1.4s, v25.4s, v8.4s\n"
+    "fmla v2.4s, v25.4s, v6.4s\n"
+    "fmla v15.4s, v25.4s, v11.4s\n"
+    "fmla v18.4s, v25.4s, v9.4s\n"
+    "fmla v19.4s, v25.4s, v12.4s\n"
+    "mov v22.16b, v14.16b\n"
+    "mov v21.16b, v14.16b\n"
+    "fmla v24.4s, v29.4s, v4.4s\n"
+    "fmla v16.4s, v29.4s, v7.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v1.4s, v29.4s, v10.4s\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v3.4s, v29.4s, v6.4s\n"
+    "str s24, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v18.4s, v29.4s, v11.4s\n"
+    "fmla v17.4s, v29.4s, v9.4s\n"
+    "ldr s27, [x11, x15]\n"
+    "fmla v22.4s, v29.4s, v12.4s\n"
+    "ldr s23, [x10, x17]\n"
+    "fmla v13.4s, v26.4s, v7.4s\n"
+    "fmla v2.4s, v26.4s, v10.4s\n"
+    "fmla v3.4s, v26.4s, v8.4s\n"
+    "fmla v17.4s, v26.4s, v11.4s\n"
+    "fmla v0.4s, v30.4s, v5.4s\n"
+    "ldr s24, [x9, x19]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "ldr s29, [x8, x21]\n"
+    "fmla v3.4s, v28.4s, v10.4s\n"
+    "ldr s14, [x12, x15]\n"
+    "fmla v20.4s, v27.4s, v4.4s\n"
+    "add x8, x8, #4\n"
+    "fmla v0.4s, v27.4s, v7.4s\n"
+    "fmla v1.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v6.4s\n"
+    "str s20, [x24]\n"
+    "fmla v19.4s, v27.4s, v9.4s\n"
+    "fmla v16.4s, v23.4s, v4.4s\n"
+    "ldr s25, [x11, x17]\n"
+    "fmla v1.4s, v23.4s, v7.4s\n"
+    "ldr s30, [x10, x19]\n"
+    "fmla v2.4s, v23.4s, v5.4s\n"
+    "fmla v15.4s, v23.4s, v10.4s\n"
+    "str s16, [x23, %[output_col_stride1]]\n"
+    "fmla v18.4s, v23.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v6.4s\n"
+    "ldr s26, [x9, x21]\n"
+    "fmla v19.4s, v23.4s, v11.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v22.4s, v23.4s, v9.4s\n"
+    "fmla v21.4s, v23.4s, v12.4s\n"
+    "fmla v13.4s, v24.4s, v4.4s\n"
+    "ldr s27, [x12, x17]\n"
+    "fmla v2.4s, v24.4s, v7.4s\n"
+    "ldr s20, [x11, x19]\n"
+    "fmla v3.4s, v24.4s, v5.4s\n"
+    "fmla v18.4s, v24.4s, v10.4s\n"
+    "str s13, [%[outptr0], x26]\n"
+    "fmla v17.4s, v24.4s, v8.4s\n"
+    "fmla v22.4s, v24.4s, v11.4s\n"
+    "ldr s23, [x10, x21]\n"
+    "fmla v3.4s, v29.4s, v7.4s\n"
+    "ldr s24, [x12, x19]\n"
+    "fmla v17.4s, v29.4s, v10.4s\n"
+    "ldr s16, [x11, x21]\n"
+    "fmla v0.4s, v14.4s, v4.4s\n"
+    "add x10, x10, #4\n"
+    "fmla v15.4s, v14.4s, v5.4s\n"
+    "add x11, x11, #4\n"
+    "fmla v19.4s, v14.4s, v6.4s\n"
+    "ldr s13, [x12, x21]\n"
+    "str s0, [x25]\n"
+    "fmla v1.4s, v25.4s, v4.4s\n"
+    "fmla v15.4s, v25.4s, v7.4s\n"
+    "add x12, x12, #4\n"
+    "fmla v18.4s, v25.4s, v5.4s\n"
+    "fmla v19.4s, v25.4s, v8.4s\n"
+    "str s1, [x24, %[output_col_stride1]]\n"
+    "fmla v22.4s, v25.4s, v6.4s\n"
+    "fmla v21.4s, v25.4s, v9.4s\n"
+    "fmla v2.4s, v30.4s, v4.4s\n"
+    "fmla v18.4s, v30.4s, v7.4s\n"
+    "fmla v17.4s, v30.4s, v5.4s\n"
+    "fmla v19.4s, v30.4s, v10.4s\n"
+    "fmla v22.4s, v30.4s, v8.4s\n"
+    "str s2, [x23, x26]\n"
+    "fmla v21.4s, v30.4s, v11.4s\n"
+    "fmla v3.4s, v26.4s, v4.4s\n"
+    "fmla v17.4s, v26.4s, v7.4s\n"
+    "fmla v22.4s, v26.4s, v10.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v19.4s, v27.4s, v5.4s\n"
+    "fmla v21.4s, v27.4s, v6.4s\n"
+    "str s3, [%[outptr0], x27]\n"
+    "fmla v18.4s, v20.4s, v4.4s\n"
+    "str s15, [x25, %[output_col_stride1]]\n"
+    "fmla v22.4s, v20.4s, v5.4s\n"
+    "fmla v19.4s, v20.4s, v7.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "str s18, [x24, x26]\n"
+    "fmla v21.4s, v20.4s, v8.4s\n"
+    "fmla v17.4s, v23.4s, v4.4s\n"
+    "fmla v22.4s, v23.4s, v7.4s\n"
+    "fmla v19.4s, v24.4s, v4.4s\n"
+    "fmla v21.4s, v23.4s, v10.4s\n"
+    "str s17, [x23, x27]\n"
+    "fmla v22.4s, v16.4s, v4.4s\n"
+    "str s19, [x25, x26]\n"
+    "add x23, x23, #4\n"
+    "fmla v21.4s, v24.4s, v5.4s\n"
+    "str s22, [x24, x27]\n"
+    "add x24, x24, #4\n"
+    "fmla v21.4s, v16.4s, v7.4s\n"
+    "fmla v21.4s, v13.4s, v4.4s\n"
+    "str s21, [x25, x27]\n"
+    "add x25, x25, #4\n"
+    "7:\n"
+    : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels)
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory"
   );
+}
 
-  int channels_remaining = n_channels;
-  if (channels_remaining >= 4 && same_strides)
-  {
-    int c4_rem = channels_remaining / 4;
-    channels_remaining %= 4;
-    const int prefetch_depth = 8;
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x9, %[inptr0], %[input_row_stride]\n"
+    "add x28, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x16, %[outptr0], %[output_row_stride]\n"
+    "add x24, x9, %[input_row_stride]\n"
+    "add x25, x28, #64\n"
+    "add x23, x28, %[input_col_stride1]\n"
+    "add x26, x24, %[input_row_stride]\n"
+    "add x11, x23, #64\n"
+    "add x12, x23, %[input_col_stride1]\n"
+    "add x10, x26, %[input_row_stride]\n"
+    "add x13, x12, #64\n"
+    "add x14, x12, %[input_col_stride1]\n"
+    "add x27, x10, %[input_row_stride]\n"
+    "add x15, x14, #64\n"
+    "add x17, x16, %[output_row_stride]\n"
+    "add x18, x17, %[output_row_stride]\n"
+    "add x19, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x21, %[n_channels], #3\n"
+    "add x20, x19, %[output_col_stride1]\n"
+    "lsr x22, %[n_channels], #2\n"
+    "cbz x22, 4f\n"
+    "1:\n"
+    "ldr q21, [%[wbptr]]\n"
+    "subs x22, x22, #1\n"
+    "mov v7.16b, v21.16b\n"
+    "ldr q20, [%[wbptr], #16]\n"
+    "mov v3.16b, v21.16b\n"
+    "ldr q14, [%[wbptr], #32]\n"
+    "mov v6.16b, v21.16b\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "mov v15.16b, v21.16b\n"
+    "ldr q17, [%[wbptr], #64]\n"
+    "mov v2.16b, v21.16b\n"
+    "ldr q12, [%[wbptr], #80]\n"
+    "mov v5.16b, v21.16b\n"
+    "ldr q11, [%[wbptr], #96]\n"
+    "mov v0.16b, v21.16b\n"
+    "ldr q10, [%[wbptr], #112]\n"
+    "mov v16.16b, v21.16b\n"
+    "ldr q9, [%[wbptr], #128]\n"
+    "mov v1.16b, v21.16b\n"
+    "ldr q8, [%[wbptr], #144]\n"
+    "mov v4.16b, v21.16b\n"
+    "ldr q22, [%[inptr0]]\n"
+    "fmla v7.4s, v22.4s, v20.4s\n"
+    "ldr q19, [x9]\n"
+    "fmla v3.4s, v19.4s, v20.4s\n"
+    "ldr q23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v6.4s, v23.4s, v20.4s\n"
+    "ldr q18, [x24]\n"
+    "fmla v7.4s, v19.4s, v17.4s\n"
+    "ldr q27, [x9, %[input_col_stride1]]\n"
+    "fmla v3.4s, v18.4s, v17.4s\n"
+    "ldr q28, [%[inptr0], x28]\n"
+    "fmla v15.4s, v18.4s, v20.4s\n"
+    "ldr q25, [x26]\n"
+    "fmla v7.4s, v23.4s, v14.4s\n"
+    "ldr q22, [x24, %[input_col_stride1]]\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x8]\n"
+    "fmla v7.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "prfm pldl1keep, [x9, x8]\n"
+    "prfm pldl1keep, [%[inptr0], x25]\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "prfm pldl1keep, [x24, x8]\n"
+    "fmla v7.4s, v27.4s, v12.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "mov v18.16b, v21.16b\n"
+    "ldr q23, [x9, x28]\n"
+    "mov v19.16b, v21.16b\n"
+    "prfm pldl1keep, [x9, x25]\n"
+    "fmla v6.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x11]\n"
+    "fmla v2.4s, v27.4s, v20.4s\n"
+    "ldr q24, [%[inptr0], x23]\n"
+    "fmla v7.4s, v28.4s, v13.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v6.4s, v28.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x8]\n"
+    "fmla v5.4s, v28.4s, v20.4s\n"
+    "ldr q26, [x10]\n"
+    "fmla v3.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x25]\n"
+    "fmla v15.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x9, x11]\n"
+    "fmla v0.4s, v25.4s, v20.4s\n"
+    "ldr q25, [x26, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x13]\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, #64]\n"
+    "fmla v6.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x10, x8]\n"
+    "fmla v15.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x25]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "prfm pldl1keep, [x24, x11]\n"
+    "fmla v16.4s, v22.4s, v20.4s\n"
+    "ldr q22, [x24, x28]\n"
+    "fmla v7.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x13]\n"
+    "fmla v3.4s, v23.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v6.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, x8]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "prfm pldl1keep, [x10, x25]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "prfm pldl1keep, [x26, x11]\n"
+    "fmla v1.4s, v23.4s, v20.4s\n"
+    "ldr q23, [x9, x23]\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x24, x13]\n"
+    "fmla v5.4s, v24.4s, v14.4s\n"
+    "prfm pldl1keep, [x9, x15]\n"
+    "fmla v4.4s, v24.4s, v20.4s\n"
+    "ldr q24, [%[inptr0], x12]\n"
+    "fmla v15.4s, v26.4s, v10.4s\n"
+    "prfm pldl1keep, [x27, x25]\n"
+    "fmla v0.4s, v26.4s, v17.4s\n"
+    "ldr q29, [x27]\n"
+    "fmla v3.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x11]\n"
+    "fmla v15.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x26, x13]\n"
+    "fmla v2.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "prfm pldl1keep, [x27, x11]\n"
+    "fmla v16.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x10, x13]\n"
+    "fmla v18.4s, v25.4s, v20.4s\n"
+    "ldr q26, [x10, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, x15]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x27, x13]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x15]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x27, x15]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v5.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v22.4s, v14.4s\n"
+    "subs x22, x22, #1\n"
+    "fmla v1.4s, v22.4s, v17.4s\n"
+    "fmla v19.4s, v22.4s, v20.4s\n"
+    "mov v22.16b, v21.16b\n"
+    "fmla v6.4s, v23.4s, v11.4s\n"
+    "fmla v2.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v23.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v14.4s\n"
+    "fmla v4.4s, v23.4s, v17.4s\n"
+    "fmla v22.4s, v23.4s, v20.4s\n"
+    "ldr q27, [x26, x28]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "fmla v0.4s, v29.4s, v10.4s\n"
+    "mov v23.16b, v21.16b\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "mov v25.16b, v21.16b\n"
+    "mov v24.16b, v21.16b\n"
+    "fmla v15.4s, v26.4s, v9.4s\n"
+    "fmla v0.4s, v26.4s, v12.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v18.4s, v26.4s, v17.4s\n"
+    "fmla v3.4s, v27.4s, v8.4s\n"
+    "ldr q29, [x24, x23]\n"
+    "fmla v15.4s, v27.4s, v11.4s\n"
+    "fmla v2.4s, v27.4s, v9.4s\n"
+    "fmla v0.4s, v27.4s, v13.4s\n"
+    "fmla v16.4s, v27.4s, v12.4s\n"
+    "fmla v1.4s, v27.4s, v10.4s\n"
+    "fmla v18.4s, v27.4s, v14.4s\n"
+    "fmla v19.4s, v27.4s, v17.4s\n"
+    "fmla v23.4s, v27.4s, v20.4s\n"
+    "fmla v6.4s, v29.4s, v8.4s\n"
+    "ldr q28, [x9, x12]\n"
+    "fmla v2.4s, v29.4s, v11.4s\n"
+    "fmla v5.4s, v29.4s, v9.4s\n"
+    "fmla v16.4s, v29.4s, v13.4s\n"
+    "fmla v1.4s, v29.4s, v12.4s\n"
+    "fmla v4.4s, v29.4s, v10.4s\n"
+    "fmla v19.4s, v29.4s, v14.4s\n"
+    "fmla v22.4s, v29.4s, v17.4s\n"
+    "fmla v25.4s, v29.4s, v20.4s\n"
+    "fmla v5.4s, v28.4s, v11.4s\n"
+    "ldr q21, [%[inptr0], x14]\n"
+    "fmla v1.4s, v28.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v22.4s, v28.4s, v14.4s\n"
+    "ldr q26, [x27, %[input_col_stride1]]\n"
+    "fmla v0.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x8]\n"
+    "fmla v4.4s, v21.4s, v13.4s\n"
+    "ldr q21, [x10, x28]\n"
+    "fmla v18.4s, v26.4s, v10.4s\n"
+    "ldr q29, [x26, x23]\n"
+    "fmla v15.4s, v21.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x25]\n"
+    "fmla v0.4s, v21.4s, v11.4s\n"
+    "fmla v16.4s, v21.4s, v9.4s\n"
+    "fmla v18.4s, v21.4s, v12.4s\n"
+    "fmla v19.4s, v21.4s, v10.4s\n"
+    "fmla v23.4s, v21.4s, v17.4s\n"
+    "ldr q21, [x24, x12]\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v16.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v9.4s\n"
+    "fmla v18.4s, v29.4s, v13.4s\n"
+    "fmla v19.4s, v29.4s, v12.4s\n"
+    "fmla v22.4s, v29.4s, v10.4s\n"
+    "fmla v23.4s, v29.4s, v14.4s\n"
+    "fmla v25.4s, v29.4s, v17.4s\n"
+    "fmla v24.4s, v29.4s, v20.4s\n"
+    "ldr q28, [x9, x14]\n"
+    "fmla v5.4s, v21.4s, v8.4s\n"
+    "ldr q27, [x27, x28]\n"
+    "fmla v1.4s, v21.4s, v11.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v4.4s, v21.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "fmla v19.4s, v21.4s, v13.4s\n"
+    "prfm pldl1keep, [x9, x8]\n"
+    "fmla v22.4s, v21.4s, v12.4s\n"
+    "fmla v25.4s, v21.4s, v14.4s\n"
+    "fmla v4.4s, v28.4s, v11.4s\n"
+    "ldr q20, [x10, x23]\n"
+    "fmla v0.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v9.4s\n"
+    "fmla v22.4s, v28.4s, v13.4s\n"
+    "ldr q26, [x26, x12]\n"
+    "fmla v23.4s, v27.4s, v10.4s\n"
+    "ldr q21, [x24, x14]\n"
+    "fmla v16.4s, v20.4s, v8.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v18.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v19.4s, v20.4s, v9.4s\n"
+    "prfm pldl1keep, [x24, x8]\n"
+    "fmla v23.4s, v20.4s, v12.4s\n"
+    "fmla v25.4s, v20.4s, v10.4s\n"
+    "fmla v24.4s, v20.4s, v17.4s\n"
+    "ldr q28, [x27, x23]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr q20, [x10, x12]\n"
+    "fmla v19.4s, v26.4s, v11.4s\n"
+    "fmla v22.4s, v26.4s, v9.4s\n"
+    "fmla v23.4s, v26.4s, v13.4s\n"
+    "fmla v25.4s, v26.4s, v12.4s\n"
+    "fmla v24.4s, v26.4s, v14.4s\n"
+    "ldr q17, [x26, x14]\n"
+    "fmla v4.4s, v21.4s, v8.4s\n"
+    "ldr q26, [x27, x12]\n"
+    "fmla v22.4s, v21.4s, v11.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v25.4s, v21.4s, v13.4s\n"
+    "ldr q27, [x10, x14]\n"
+    "fmla v18.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v23.4s, v28.4s, v9.4s\n"
+    "add x10, x10, #16\n"
+    "fmla v24.4s, v28.4s, v10.4s\n"
+    "ldr q28, [x27, x14]\n"
+    "fmla v19.4s, v20.4s, v8.4s\n"
+    "ldr q21, [%[wbptr]]\n"
+    "fmla v23.4s, v20.4s, v11.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v25.4s, v20.4s, v9.4s\n"
+    "fmla v24.4s, v20.4s, v12.4s\n"
+    "fmla v22.4s, v17.4s, v8.4s\n"
+    "ldr q20, [%[wbptr], #16]\n"
+    "fmla v23.4s, v26.4s, v8.4s\n"
+    "ldr q14, [%[wbptr], #32]\n"
+    "fmla v24.4s, v17.4s, v13.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v25.4s, v17.4s, v11.4s\n"
+    "ldr q17, [%[wbptr], #64]\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "ldr q13, [%[wbptr], #48]\n"
+    "str q7, [%[outptr0]]\n"
+    "fmla v25.4s, v27.4s, v8.4s\n"
+    "str q6, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "ldr q12, [%[wbptr], #80]\n"
+    "str q5, [%[outptr0], x19]\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "ldr q10, [%[wbptr], #112]\n"
+    "str q4, [%[outptr0], x20]\n"
+    "fmla v24.4s, v28.4s, v8.4s\n"
+    "str q3, [x16]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "ldr q11, [%[wbptr], #96]\n"
+    "str q2, [x16, %[output_col_stride1]]\n"
+    "fmax v22.4s, v22.4s, v29.4s\n"
+    "str q1, [x16, x19]\n"
+    "fmax v15.4s, v15.4s, v29.4s\n"
+    "str q22, [x16, x20]\n"
+    "fmax v16.4s, v16.4s, v29.4s\n"
+    "str q15, [x17]\n"
+    "fmax v19.4s, v19.4s, v29.4s\n"
+    "str q16, [x17, %[output_col_stride1]]\n"
+    "fmax v25.4s, v25.4s, v29.4s\n"
+    "str q19, [x17, x19]\n"
+    "fmax v0.4s, v0.4s, v29.4s\n"
+    "str q25, [x17, x20]\n"
+    "fmax v18.4s, v18.4s, v29.4s\n"
+    "str q0, [x18]\n"
+    "fmax v23.4s, v23.4s, v29.4s\n"
+    "str q18, [x18, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v29.4s\n"
+    "str q23, [x18, x19]\n"
+    "mov v7.16b, v21.16b\n"
+    "str q24, [x18, x20]\n"
+    "mov v3.16b, v21.16b\n"
+    "mov v6.16b, v21.16b\n"
+    "ldr q9, [%[wbptr], #128]\n"
+    "mov v15.16b, v21.16b\n"
+    "ldr q8, [%[wbptr], #144]\n"
+    "mov v2.16b, v21.16b\n"
+    "ldr q22, [%[inptr0]]\n"
+    "mov v5.16b, v21.16b\n"
+    "ldr q19, [x9]\n"
+    "mov v0.16b, v21.16b\n"
+    "ldr q23, [%[inptr0], %[input_col_stride1]]\n"
+    "mov v16.16b, v21.16b\n"
+    "ldr q18, [x24]\n"
+    "mov v1.16b, v21.16b\n"
+    "ldr q27, [x9, %[input_col_stride1]]\n"
+    "mov v4.16b, v21.16b\n"
+    "ldr q28, [%[inptr0], x28]\n"
+    "fmla v7.4s, v22.4s, v20.4s\n"
+    "ldr q25, [x26]\n"
+    "fmla v3.4s, v19.4s, v20.4s\n"
+    "ldr q22, [x24, %[input_col_stride1]]\n"
+    "fmla v6.4s, v23.4s, v20.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmla v7.4s, v19.4s, v17.4s\n"
+    "add x16, x16, #16\n"
+    "fmla v3.4s, v18.4s, v17.4s\n"
+    "add x17, x17, #16\n"
+    "fmla v15.4s, v18.4s, v20.4s\n"
+    "add x18, x18, #16\n"
+    "fmla v7.4s, v23.4s, v14.4s\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "fmla v7.4s, v18.4s, v10.4s\n"
+    "fmla v7.4s, v27.4s, v12.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "mov v18.16b, v21.16b\n"
+    "ldr q23, [x9, x28]\n"
+    "mov v19.16b, v21.16b\n"
+    "prfm pldl1keep, [x9, x25]\n"
+    "fmla v6.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x11]\n"
+    "fmla v2.4s, v27.4s, v20.4s\n"
+    "ldr q24, [%[inptr0], x23]\n"
+    "fmla v7.4s, v28.4s, v13.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v6.4s, v28.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x8]\n"
+    "fmla v5.4s, v28.4s, v20.4s\n"
+    "ldr q26, [x10]\n"
+    "fmla v3.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x25]\n"
+    "fmla v15.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x9, x11]\n"
+    "fmla v0.4s, v25.4s, v20.4s\n"
+    "ldr q25, [x26, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x13]\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, #64]\n"
+    "fmla v6.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x10, x8]\n"
+    "fmla v15.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x25]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "prfm pldl1keep, [x24, x11]\n"
+    "fmla v16.4s, v22.4s, v20.4s\n"
+    "ldr q22, [x24, x28]\n"
+    "fmla v7.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x13]\n"
+    "fmla v3.4s, v23.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v6.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, x8]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "prfm pldl1keep, [x10, x25]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "prfm pldl1keep, [x26, x11]\n"
+    "fmla v1.4s, v23.4s, v20.4s\n"
+    "ldr q23, [x9, x23]\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x24, x13]\n"
+    "fmla v5.4s, v24.4s, v14.4s\n"
+    "prfm pldl1keep, [x9, x15]\n"
+    "fmla v4.4s, v24.4s, v20.4s\n"
+    "ldr q24, [%[inptr0], x12]\n"
+    "fmla v15.4s, v26.4s, v10.4s\n"
+    "prfm pldl1keep, [x27, x25]\n"
+    "fmla v0.4s, v26.4s, v17.4s\n"
+    "ldr q29, [x27]\n"
+    "fmla v3.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x11]\n"
+    "fmla v15.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x26, x13]\n"
+    "fmla v2.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "prfm pldl1keep, [x27, x11]\n"
+    "fmla v16.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x10, x13]\n"
+    "fmla v18.4s, v25.4s, v20.4s\n"
+    "ldr q26, [x10, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, x15]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x27, x13]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x15]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x27, x15]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v5.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v22.4s, v14.4s\n"
+    "fmla v1.4s, v22.4s, v17.4s\n"
+    "fmla v19.4s, v22.4s, v20.4s\n"
+    "ldr q27, [x26, x28]\n"
+    "fmla v6.4s, v23.4s, v11.4s\n"
+    "fmla v2.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v23.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v14.4s\n"
+    "fmla v4.4s, v23.4s, v17.4s\n"
+    "fmla v0.4s, v29.4s, v10.4s\n"
+    "mov v22.16b, v21.16b\n"
+    "fmla v15.4s, v26.4s, v9.4s\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v22.4s, v23.4s, v20.4s\n"
+    "ldr q29, [x24, x23]\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "ldr q28, [x9, x12]\n"
+    "fmla v0.4s, v26.4s, v12.4s\n"
+    "fmla v18.4s, v26.4s, v17.4s\n"
+    "mov v23.16b, v21.16b\n"
+    "fmla v3.4s, v27.4s, v8.4s\n"
+    "fmla v15.4s, v27.4s, v11.4s\n"
+    "fmla v2.4s, v27.4s, v9.4s\n"
+    "fmla v0.4s, v27.4s, v13.4s\n"
+    "fmla v16.4s, v27.4s, v12.4s\n"
+    "fmla v1.4s, v27.4s, v10.4s\n"
+    "fmla v18.4s, v27.4s, v14.4s\n"
+    "fmla v19.4s, v27.4s, v17.4s\n"
+    "fmla v23.4s, v27.4s, v20.4s\n"
+    "mov v25.16b, v21.16b\n"
+    "mov v24.16b, v21.16b\n"
+    "fmla v6.4s, v29.4s, v8.4s\n"
+    "fmla v2.4s, v29.4s, v11.4s\n"
+    "fmla v5.4s, v29.4s, v9.4s\n"
+    "fmla v16.4s, v29.4s, v13.4s\n"
+    "fmla v1.4s, v29.4s, v12.4s\n"
+    "fmla v4.4s, v29.4s, v10.4s\n"
+    "fmla v19.4s, v29.4s, v14.4s\n"
+    "fmla v22.4s, v29.4s, v17.4s\n"
+    "fmla v25.4s, v29.4s, v20.4s\n"
+    "ldr q21, [%[inptr0], x14]\n"
+    "fmla v5.4s, v28.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v1.4s, v28.4s, v13.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v22.4s, v28.4s, v14.4s\n"
+    "ldr q26, [x27, %[input_col_stride1]]\n"
+    "fmla v0.4s, v26.4s, v9.4s\n"
+    "fmla v18.4s, v26.4s, v10.4s\n"
+    "fmla v4.4s, v21.4s, v13.4s\n"
+    "ldr q21, [x10, x28]\n"
+    "fmla v15.4s, v21.4s, v8.4s\n"
+    "ldr q29, [x26, x23]\n"
+    "fmla v0.4s, v21.4s, v11.4s\n"
+    "fmla v16.4s, v21.4s, v9.4s\n"
+    "fmla v18.4s, v21.4s, v12.4s\n"
+    "fmla v19.4s, v21.4s, v10.4s\n"
+    "fmla v23.4s, v21.4s, v17.4s\n"
+    "ldr q21, [x24, x12]\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v16.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v9.4s\n"
+    "fmla v18.4s, v29.4s, v13.4s\n"
+    "fmla v19.4s, v29.4s, v12.4s\n"
+    "fmla v22.4s, v29.4s, v10.4s\n"
+    "fmla v23.4s, v29.4s, v14.4s\n"
+    "fmla v25.4s, v29.4s, v17.4s\n"
+    "fmla v24.4s, v29.4s, v20.4s\n"
+    "ldr q28, [x9, x14]\n"
+    "fmla v5.4s, v21.4s, v8.4s\n"
+    "ldr q27, [x27, x28]\n"
+    "fmla v1.4s, v21.4s, v11.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v4.4s, v21.4s, v9.4s\n"
+    "fmla v19.4s, v21.4s, v13.4s\n"
+    "fmla v22.4s, v21.4s, v12.4s\n"
+    "fmla v25.4s, v21.4s, v14.4s\n"
+    "fmla v0.4s, v27.4s, v8.4s\n"
+    "ldr q20, [x10, x23]\n"
+    "fmla v4.4s, v28.4s, v11.4s\n"
+    "fmla v18.4s, v27.4s, v9.4s\n"
+    "fmla v22.4s, v28.4s, v13.4s\n"
+    "ldr q26, [x26, x12]\n"
+    "fmla v23.4s, v27.4s, v10.4s\n"
+    "ldr q21, [x24, x14]\n"
+    "fmla v16.4s, v20.4s, v8.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v18.4s, v20.4s, v11.4s\n"
+    "fmla v19.4s, v20.4s, v9.4s\n"
+    "fmla v23.4s, v20.4s, v12.4s\n"
+    "fmla v25.4s, v20.4s, v10.4s\n"
+    "fmla v24.4s, v20.4s, v17.4s\n"
+    "ldr q28, [x27, x23]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr q20, [x10, x12]\n"
+    "fmla v19.4s, v26.4s, v11.4s\n"
+    "fmla v22.4s, v26.4s, v9.4s\n"
+    "fmla v23.4s, v26.4s, v13.4s\n"
+    "fmla v25.4s, v26.4s, v12.4s\n"
+    "fmla v24.4s, v26.4s, v14.4s\n"
+    "ldr q17, [x26, x14]\n"
+    "fmla v4.4s, v21.4s, v8.4s\n"
+    "ldr q26, [x27, x12]\n"
+    "fmla v22.4s, v21.4s, v11.4s\n"
+    "add x26, x26, #16\n"
+    "fmla v25.4s, v21.4s, v13.4s\n"
+    "ldr q27, [x10, x14]\n"
+    "fmla v18.4s, v28.4s, v8.4s\n"
+    "add x10, x10, #16\n"
+    "fmla v23.4s, v28.4s, v9.4s\n"
+    "fmla v24.4s, v28.4s, v10.4s\n"
+    "fmla v19.4s, v20.4s, v8.4s\n"
+    "ldr q28, [x27, x14]\n"
+    "fmla v25.4s, v20.4s, v9.4s\n"
+    "add x27, x27, #16\n"
+    "fmla v23.4s, v20.4s, v11.4s\n"
+    "fmla v24.4s, v20.4s, v12.4s\n"
+    "fmla v22.4s, v17.4s, v8.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v25.4s, v17.4s, v11.4s\n"
+    "fmla v24.4s, v17.4s, v13.4s\n"
+    "fmla v23.4s, v26.4s, v8.4s\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmla v25.4s, v27.4s, v8.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "str q7, [%[outptr0]]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "str q6, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "str q5, [%[outptr0], x19]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "str q4, [%[outptr0], x20]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "str q3, [x16]\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "str q2, [x16, %[output_col_stride1]]\n"
+    "fmla v24.4s, v28.4s, v8.4s\n"
+    "str q1, [x16, x19]\n"
+    "fmax v22.4s, v22.4s, v29.4s\n"
+    "fmax v15.4s, v15.4s, v29.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "str q22, [x16, x20]\n"
+    "fmax v16.4s, v16.4s, v29.4s\n"
+    "str q15, [x17]\n"
+    "fmax v19.4s, v19.4s, v29.4s\n"
+    "str q16, [x17, %[output_col_stride1]]\n"
+    "fmax v25.4s, v25.4s, v29.4s\n"
+    "str q19, [x17, x19]\n"
+    "fmax v0.4s, v0.4s, v29.4s\n"
+    "str q25, [x17, x20]\n"
+    "fmax v18.4s, v18.4s, v29.4s\n"
+    "str q0, [x18]\n"
+    "fmax v23.4s, v23.4s, v29.4s\n"
+    "str q18, [x18, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v29.4s\n"
+    "str q23, [x18, x19]\n"
+    "add x16, x16, #16\n"
+    "str q24, [x18, x20]\n"
+    "add x17, x17, #16\n"
+    "add x18, x18, #16\n"
+    "4:\n"
+    "cbz x21, 7f\n"
+    "ldr s21, [%[wbptr]]\n"
+    "mov v7.16b, v21.16b\n"
+    "ldr s20, [%[wbptr], #4]\n"
+    "mov v3.16b, v21.16b\n"
+    "ldr s14, [%[wbptr], #8]\n"
+    "mov v6.16b, v21.16b\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "mov v15.16b, v21.16b\n"
+    "ldr s17, [%[wbptr], #16]\n"
+    "mov v2.16b, v21.16b\n"
+    "ldr s12, [%[wbptr], #20]\n"
+    "mov v5.16b, v21.16b\n"
+    "ldr s11, [%[wbptr], #24]\n"
+    "mov v0.16b, v21.16b\n"
+    "ldr s10, [%[wbptr], #28]\n"
+    "mov v16.16b, v21.16b\n"
+    "ldr s9, [%[wbptr], #32]\n"
+    "mov v1.16b, v21.16b\n"
+    "ldr s8, [%[wbptr], #36]\n"
+    "mov v4.16b, v21.16b\n"
+    "ldr s22, [%[inptr0]]\n"
+    "fmla v7.4s, v22.4s, v20.4s\n"
+    "ldr s19, [x9]\n"
+    "fmla v3.4s, v19.4s, v20.4s\n"
+    "ldr s23, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v6.4s, v23.4s, v20.4s\n"
+    "ldr s18, [x24]\n"
+    "fmla v7.4s, v19.4s, v17.4s\n"
+    "ldr s27, [x9, %[input_col_stride1]]\n"
+    "fmla v3.4s, v18.4s, v17.4s\n"
+    "ldr s28, [%[inptr0], x28]\n"
+    "fmla v15.4s, v18.4s, v20.4s\n"
+    "ldr s25, [x26]\n"
+    "fmla v7.4s, v23.4s, v14.4s\n"
+    "ldr s22, [x24, %[input_col_stride1]]\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "subs x21, x21, #1\n"
+    "prfm pldl1keep, [%[inptr0], x8]\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v7.4s, v18.4s, v10.4s\n"
+    "prfm pldl1keep, [x9, x8]\n"
+    "prfm pldl1keep, [%[inptr0], x25]\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "prfm pldl1keep, [x24, x8]\n"
+    "fmla v7.4s, v27.4s, v12.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "mov v18.16b, v21.16b\n"
+    "ldr s23, [x9, x28]\n"
+    "mov v19.16b, v21.16b\n"
+    "prfm pldl1keep, [x9, x25]\n"
+    "fmla v6.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x11]\n"
+    "fmla v2.4s, v27.4s, v20.4s\n"
+    "ldr s24, [%[inptr0], x23]\n"
+    "fmla v7.4s, v28.4s, v13.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v6.4s, v28.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x8]\n"
+    "fmla v5.4s, v28.4s, v20.4s\n"
+    "ldr s26, [x10]\n"
+    "fmla v3.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x25]\n"
+    "fmla v15.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x9, x11]\n"
+    "fmla v0.4s, v25.4s, v20.4s\n"
+    "ldr s25, [x26, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x13]\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, #64]\n"
+    "fmla v6.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x10, x8]\n"
+    "fmla v15.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x25]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "prfm pldl1keep, [x24, x11]\n"
+    "fmla v16.4s, v22.4s, v20.4s\n"
+    "ldr s22, [x24, x28]\n"
+    "fmla v7.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x13]\n"
+    "fmla v3.4s, v23.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v6.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, x8]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "prfm pldl1keep, [x10, x25]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "prfm pldl1keep, [x26, x11]\n"
+    "fmla v1.4s, v23.4s, v20.4s\n"
+    "ldr s23, [x9, x23]\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x24, x13]\n"
+    "fmla v5.4s, v24.4s, v14.4s\n"
+    "prfm pldl1keep, [x9, x15]\n"
+    "fmla v4.4s, v24.4s, v20.4s\n"
+    "ldr s24, [%[inptr0], x12]\n"
+    "fmla v15.4s, v26.4s, v10.4s\n"
+    "prfm pldl1keep, [x27, x25]\n"
+    "fmla v0.4s, v26.4s, v17.4s\n"
+    "ldr s29, [x27]\n"
+    "fmla v3.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x11]\n"
+    "fmla v15.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x26, x13]\n"
+    "fmla v2.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "prfm pldl1keep, [x27, x11]\n"
+    "fmla v16.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x10, x13]\n"
+    "fmla v18.4s, v25.4s, v20.4s\n"
+    "ldr s26, [x10, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, x15]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x27, x13]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x15]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x27, x15]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v5.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v22.4s, v14.4s\n"
+    "subs x21, x21, #1\n"
+    "fmla v1.4s, v22.4s, v17.4s\n"
+    "fmla v19.4s, v22.4s, v20.4s\n"
+    "mov v22.16b, v21.16b\n"
+    "fmla v6.4s, v23.4s, v11.4s\n"
+    "fmla v2.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v23.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v14.4s\n"
+    "fmla v4.4s, v23.4s, v17.4s\n"
+    "fmla v22.4s, v23.4s, v20.4s\n"
+    "ldr s27, [x26, x28]\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "fmla v0.4s, v29.4s, v10.4s\n"
+    "mov v23.16b, v21.16b\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "mov v25.16b, v21.16b\n"
+    "mov v24.16b, v21.16b\n"
+    "fmla v15.4s, v26.4s, v9.4s\n"
+    "fmla v0.4s, v26.4s, v12.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v18.4s, v26.4s, v17.4s\n"
+    "fmla v3.4s, v27.4s, v8.4s\n"
+    "ldr s29, [x24, x23]\n"
+    "fmla v15.4s, v27.4s, v11.4s\n"
+    "fmla v2.4s, v27.4s, v9.4s\n"
+    "fmla v0.4s, v27.4s, v13.4s\n"
+    "fmla v16.4s, v27.4s, v12.4s\n"
+    "fmla v1.4s, v27.4s, v10.4s\n"
+    "fmla v18.4s, v27.4s, v14.4s\n"
+    "fmla v19.4s, v27.4s, v17.4s\n"
+    "fmla v23.4s, v27.4s, v20.4s\n"
+    "fmla v6.4s, v29.4s, v8.4s\n"
+    "ldr s28, [x9, x12]\n"
+    "fmla v2.4s, v29.4s, v11.4s\n"
+    "fmla v5.4s, v29.4s, v9.4s\n"
+    "fmla v16.4s, v29.4s, v13.4s\n"
+    "fmla v1.4s, v29.4s, v12.4s\n"
+    "fmla v4.4s, v29.4s, v10.4s\n"
+    "fmla v19.4s, v29.4s, v14.4s\n"
+    "fmla v22.4s, v29.4s, v17.4s\n"
+    "fmla v25.4s, v29.4s, v20.4s\n"
+    "fmla v5.4s, v28.4s, v11.4s\n"
+    "ldr s21, [%[inptr0], x14]\n"
+    "fmla v1.4s, v28.4s, v13.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v22.4s, v28.4s, v14.4s\n"
+    "ldr s26, [x27, %[input_col_stride1]]\n"
+    "fmla v0.4s, v26.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x8]\n"
+    "fmla v4.4s, v21.4s, v13.4s\n"
+    "ldr s21, [x10, x28]\n"
+    "fmla v18.4s, v26.4s, v10.4s\n"
+    "ldr s29, [x26, x23]\n"
+    "fmla v15.4s, v21.4s, v8.4s\n"
+    "prfm pldl1keep, [%[inptr0], x25]\n"
+    "fmla v0.4s, v21.4s, v11.4s\n"
+    "fmla v16.4s, v21.4s, v9.4s\n"
+    "fmla v18.4s, v21.4s, v12.4s\n"
+    "fmla v19.4s, v21.4s, v10.4s\n"
+    "fmla v23.4s, v21.4s, v17.4s\n"
+    "ldr s21, [x24, x12]\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v16.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v9.4s\n"
+    "fmla v18.4s, v29.4s, v13.4s\n"
+    "fmla v19.4s, v29.4s, v12.4s\n"
+    "fmla v22.4s, v29.4s, v10.4s\n"
+    "fmla v23.4s, v29.4s, v14.4s\n"
+    "fmla v25.4s, v29.4s, v17.4s\n"
+    "fmla v24.4s, v29.4s, v20.4s\n"
+    "ldr s28, [x9, x14]\n"
+    "fmla v5.4s, v21.4s, v8.4s\n"
+    "ldr s27, [x27, x28]\n"
+    "fmla v1.4s, v21.4s, v11.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v4.4s, v21.4s, v9.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "fmla v19.4s, v21.4s, v13.4s\n"
+    "prfm pldl1keep, [x9, x8]\n"
+    "fmla v22.4s, v21.4s, v12.4s\n"
+    "fmla v25.4s, v21.4s, v14.4s\n"
+    "fmla v4.4s, v28.4s, v11.4s\n"
+    "ldr s20, [x10, x23]\n"
+    "fmla v0.4s, v27.4s, v8.4s\n"
+    "fmla v18.4s, v27.4s, v9.4s\n"
+    "fmla v22.4s, v28.4s, v13.4s\n"
+    "ldr s26, [x26, x12]\n"
+    "fmla v23.4s, v27.4s, v10.4s\n"
+    "ldr s21, [x24, x14]\n"
+    "fmla v16.4s, v20.4s, v8.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v18.4s, v20.4s, v11.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v19.4s, v20.4s, v9.4s\n"
+    "prfm pldl1keep, [x24, x8]\n"
+    "fmla v23.4s, v20.4s, v12.4s\n"
+    "fmla v25.4s, v20.4s, v10.4s\n"
+    "fmla v24.4s, v20.4s, v17.4s\n"
+    "ldr s28, [x27, x23]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr s20, [x10, x12]\n"
+    "fmla v19.4s, v26.4s, v11.4s\n"
+    "fmla v22.4s, v26.4s, v9.4s\n"
+    "fmla v23.4s, v26.4s, v13.4s\n"
+    "fmla v25.4s, v26.4s, v12.4s\n"
+    "fmla v24.4s, v26.4s, v14.4s\n"
+    "ldr s17, [x26, x14]\n"
+    "fmla v4.4s, v21.4s, v8.4s\n"
+    "ldr s26, [x27, x12]\n"
+    "fmla v22.4s, v21.4s, v11.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v25.4s, v21.4s, v13.4s\n"
+    "ldr s27, [x10, x14]\n"
+    "fmla v18.4s, v28.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, #64]\n"
+    "fmla v23.4s, v28.4s, v9.4s\n"
+    "add x10, x10, #4\n"
+    "fmla v24.4s, v28.4s, v10.4s\n"
+    "ldr s28, [x27, x14]\n"
+    "fmla v19.4s, v20.4s, v8.4s\n"
+    "ldr s21, [%[wbptr]]\n"
+    "fmla v23.4s, v20.4s, v11.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v25.4s, v20.4s, v9.4s\n"
+    "fmla v24.4s, v20.4s, v12.4s\n"
+    "fmla v22.4s, v17.4s, v8.4s\n"
+    "ldr s20, [%[wbptr], #4]\n"
+    "fmla v23.4s, v26.4s, v8.4s\n"
+    "ldr s14, [%[wbptr], #8]\n"
+    "fmla v24.4s, v17.4s, v13.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v25.4s, v17.4s, v11.4s\n"
+    "ldr s17, [%[wbptr], #16]\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "ldr s13, [%[wbptr], #12]\n"
+    "str s7, [%[outptr0]]\n"
+    "fmla v25.4s, v27.4s, v8.4s\n"
+    "str s6, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "ldr s12, [%[wbptr], #20]\n"
+    "str s5, [%[outptr0], x19]\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "ldr s10, [%[wbptr], #28]\n"
+    "str s4, [%[outptr0], x20]\n"
+    "fmla v24.4s, v28.4s, v8.4s\n"
+    "str s3, [x16]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "ldr s11, [%[wbptr], #24]\n"
+    "str s2, [x16, %[output_col_stride1]]\n"
+    "fmax v22.4s, v22.4s, v29.4s\n"
+    "str s1, [x16, x19]\n"
+    "fmax v15.4s, v15.4s, v29.4s\n"
+    "str s22, [x16, x20]\n"
+    "fmax v16.4s, v16.4s, v29.4s\n"
+    "str s15, [x17]\n"
+    "fmax v19.4s, v19.4s, v29.4s\n"
+    "str s16, [x17, %[output_col_stride1]]\n"
+    "fmax v25.4s, v25.4s, v29.4s\n"
+    "str s19, [x17, x19]\n"
+    "fmax v0.4s, v0.4s, v29.4s\n"
+    "str s25, [x17, x20]\n"
+    "fmax v18.4s, v18.4s, v29.4s\n"
+    "str s0, [x18]\n"
+    "fmax v23.4s, v23.4s, v29.4s\n"
+    "str s18, [x18, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v29.4s\n"
+    "str s23, [x18, x19]\n"
+    "mov v7.16b, v21.16b\n"
+    "str s24, [x18, x20]\n"
+    "mov v3.16b, v21.16b\n"
+    "mov v6.16b, v21.16b\n"
+    "ldr s9, [%[wbptr], #32]\n"
+    "mov v15.16b, v21.16b\n"
+    "ldr s8, [%[wbptr], #36]\n"
+    "mov v2.16b, v21.16b\n"
+    "ldr s22, [%[inptr0]]\n"
+    "mov v5.16b, v21.16b\n"
+    "ldr s19, [x9]\n"
+    "mov v0.16b, v21.16b\n"
+    "ldr s23, [%[inptr0], %[input_col_stride1]]\n"
+    "mov v16.16b, v21.16b\n"
+    "ldr s18, [x24]\n"
+    "mov v1.16b, v21.16b\n"
+    "ldr s27, [x9, %[input_col_stride1]]\n"
+    "mov v4.16b, v21.16b\n"
+    "ldr s28, [%[inptr0], x28]\n"
+    "fmla v7.4s, v22.4s, v20.4s\n"
+    "ldr s25, [x26]\n"
+    "fmla v3.4s, v19.4s, v20.4s\n"
+    "ldr s22, [x24, %[input_col_stride1]]\n"
+    "fmla v6.4s, v23.4s, v20.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmla v7.4s, v19.4s, v17.4s\n"
+    "add x16, x16, #4\n"
+    "fmla v3.4s, v18.4s, v17.4s\n"
+    "add x17, x17, #4\n"
+    "fmla v15.4s, v18.4s, v20.4s\n"
+    "add x18, x18, #4\n"
+    "fmla v7.4s, v23.4s, v14.4s\n"
+    "fmla v3.4s, v27.4s, v14.4s\n"
+    "fmla v7.4s, v18.4s, v10.4s\n"
+    "fmla v7.4s, v27.4s, v12.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "mov v18.16b, v21.16b\n"
+    "ldr s23, [x9, x28]\n"
+    "mov v19.16b, v21.16b\n"
+    "prfm pldl1keep, [x9, x25]\n"
+    "fmla v6.4s, v27.4s, v17.4s\n"
+    "prfm pldl1keep, [%[inptr0], x11]\n"
+    "fmla v2.4s, v27.4s, v20.4s\n"
+    "ldr s24, [%[inptr0], x23]\n"
+    "fmla v7.4s, v28.4s, v13.4s\n"
+    "prfm pldl1keep, [x10, #64]\n"
+    "fmla v6.4s, v28.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x8]\n"
+    "fmla v5.4s, v28.4s, v20.4s\n"
+    "ldr s26, [x10]\n"
+    "fmla v3.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x25]\n"
+    "fmla v15.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x9, x11]\n"
+    "fmla v0.4s, v25.4s, v20.4s\n"
+    "ldr s25, [x26, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [%[inptr0], x13]\n"
+    "fmla v3.4s, v22.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, #64]\n"
+    "fmla v6.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [x10, x8]\n"
+    "fmla v15.4s, v22.4s, v14.4s\n"
+    "prfm pldl1keep, [x26, x25]\n"
+    "fmla v2.4s, v22.4s, v17.4s\n"
+    "prfm pldl1keep, [x24, x11]\n"
+    "fmla v16.4s, v22.4s, v20.4s\n"
+    "ldr s22, [x24, x28]\n"
+    "fmla v7.4s, v23.4s, v11.4s\n"
+    "prfm pldl1keep, [x9, x13]\n"
+    "fmla v3.4s, v23.4s, v13.4s\n"
+    "prfm pldl1keep, [%[inptr0], x15]\n"
+    "fmla v6.4s, v23.4s, v12.4s\n"
+    "prfm pldl1keep, [x27, x8]\n"
+    "fmla v2.4s, v23.4s, v14.4s\n"
+    "prfm pldl1keep, [x10, x25]\n"
+    "fmla v5.4s, v23.4s, v17.4s\n"
+    "prfm pldl1keep, [x26, x11]\n"
+    "fmla v1.4s, v23.4s, v20.4s\n"
+    "ldr s23, [x9, x23]\n"
+    "fmla v6.4s, v24.4s, v13.4s\n"
+    "prfm pldl1keep, [x24, x13]\n"
+    "fmla v5.4s, v24.4s, v14.4s\n"
+    "prfm pldl1keep, [x9, x15]\n"
+    "fmla v4.4s, v24.4s, v20.4s\n"
+    "ldr s24, [%[inptr0], x12]\n"
+    "fmla v15.4s, v26.4s, v10.4s\n"
+    "prfm pldl1keep, [x27, x25]\n"
+    "fmla v0.4s, v26.4s, v17.4s\n"
+    "ldr s29, [x27]\n"
+    "fmla v3.4s, v25.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x11]\n"
+    "fmla v15.4s, v25.4s, v12.4s\n"
+    "prfm pldl1keep, [x26, x13]\n"
+    "fmla v2.4s, v25.4s, v10.4s\n"
+    "prfm pldl1keep, [x24, x15]\n"
+    "fmla v0.4s, v25.4s, v14.4s\n"
+    "prfm pldl1keep, [x27, x11]\n"
+    "fmla v16.4s, v25.4s, v17.4s\n"
+    "prfm pldl1keep, [x10, x13]\n"
+    "fmla v18.4s, v25.4s, v20.4s\n"
+    "ldr s26, [x10, %[input_col_stride1]]\n"
+    "fmla v7.4s, v22.4s, v8.4s\n"
+    "prfm pldl1keep, [x26, x15]\n"
+    "fmla v3.4s, v22.4s, v11.4s\n"
+    "prfm pldl1keep, [x27, x13]\n"
+    "fmla v6.4s, v22.4s, v9.4s\n"
+    "prfm pldl1keep, [x10, x15]\n"
+    "fmla v15.4s, v22.4s, v13.4s\n"
+    "prfm pldl1keep, [x27, x15]\n"
+    "fmla v2.4s, v22.4s, v12.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v5.4s, v22.4s, v10.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v16.4s, v22.4s, v14.4s\n"
+    "fmla v1.4s, v22.4s, v17.4s\n"
+    "fmla v19.4s, v22.4s, v20.4s\n"
+    "ldr s27, [x26, x28]\n"
+    "fmla v6.4s, v23.4s, v11.4s\n"
+    "fmla v2.4s, v23.4s, v13.4s\n"
+    "fmla v5.4s, v23.4s, v12.4s\n"
+    "fmla v1.4s, v23.4s, v14.4s\n"
+    "fmla v4.4s, v23.4s, v17.4s\n"
+    "fmla v0.4s, v29.4s, v10.4s\n"
+    "mov v22.16b, v21.16b\n"
+    "fmla v15.4s, v26.4s, v9.4s\n"
+    "fmla v5.4s, v24.4s, v13.4s\n"
+    "fmla v16.4s, v26.4s, v10.4s\n"
+    "fmla v22.4s, v23.4s, v20.4s\n"
+    "ldr s29, [x24, x23]\n"
+    "fmla v4.4s, v24.4s, v14.4s\n"
+    "ldr s28, [x9, x12]\n"
+    "fmla v0.4s, v26.4s, v12.4s\n"
+    "fmla v18.4s, v26.4s, v17.4s\n"
+    "mov v23.16b, v21.16b\n"
+    "fmla v3.4s, v27.4s, v8.4s\n"
+    "fmla v15.4s, v27.4s, v11.4s\n"
+    "fmla v2.4s, v27.4s, v9.4s\n"
+    "fmla v0.4s, v27.4s, v13.4s\n"
+    "fmla v16.4s, v27.4s, v12.4s\n"
+    "fmla v1.4s, v27.4s, v10.4s\n"
+    "fmla v18.4s, v27.4s, v14.4s\n"
+    "fmla v19.4s, v27.4s, v17.4s\n"
+    "fmla v23.4s, v27.4s, v20.4s\n"
+    "mov v25.16b, v21.16b\n"
+    "mov v24.16b, v21.16b\n"
+    "fmla v6.4s, v29.4s, v8.4s\n"
+    "fmla v2.4s, v29.4s, v11.4s\n"
+    "fmla v5.4s, v29.4s, v9.4s\n"
+    "fmla v16.4s, v29.4s, v13.4s\n"
+    "fmla v1.4s, v29.4s, v12.4s\n"
+    "fmla v4.4s, v29.4s, v10.4s\n"
+    "fmla v19.4s, v29.4s, v14.4s\n"
+    "fmla v22.4s, v29.4s, v17.4s\n"
+    "fmla v25.4s, v29.4s, v20.4s\n"
+    "ldr s21, [%[inptr0], x14]\n"
+    "fmla v5.4s, v28.4s, v11.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v1.4s, v28.4s, v13.4s\n"
+    "fmla v4.4s, v28.4s, v12.4s\n"
+    "fmla v22.4s, v28.4s, v14.4s\n"
+    "ldr s26, [x27, %[input_col_stride1]]\n"
+    "fmla v0.4s, v26.4s, v9.4s\n"
+    "fmla v18.4s, v26.4s, v10.4s\n"
+    "fmla v4.4s, v21.4s, v13.4s\n"
+    "ldr s21, [x10, x28]\n"
+    "fmla v15.4s, v21.4s, v8.4s\n"
+    "ldr s29, [x26, x23]\n"
+    "fmla v0.4s, v21.4s, v11.4s\n"
+    "fmla v16.4s, v21.4s, v9.4s\n"
+    "fmla v18.4s, v21.4s, v12.4s\n"
+    "fmla v19.4s, v21.4s, v10.4s\n"
+    "fmla v23.4s, v21.4s, v17.4s\n"
+    "ldr s21, [x24, x12]\n"
+    "fmla v2.4s, v29.4s, v8.4s\n"
+    "fmla v16.4s, v29.4s, v11.4s\n"
+    "fmla v1.4s, v29.4s, v9.4s\n"
+    "fmla v18.4s, v29.4s, v13.4s\n"
+    "fmla v19.4s, v29.4s, v12.4s\n"
+    "fmla v22.4s, v29.4s, v10.4s\n"
+    "fmla v23.4s, v29.4s, v14.4s\n"
+    "fmla v25.4s, v29.4s, v17.4s\n"
+    "fmla v24.4s, v29.4s, v20.4s\n"
+    "ldr s28, [x9, x14]\n"
+    "fmla v5.4s, v21.4s, v8.4s\n"
+    "ldr s27, [x27, x28]\n"
+    "fmla v1.4s, v21.4s, v11.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v4.4s, v21.4s, v9.4s\n"
+    "fmla v19.4s, v21.4s, v13.4s\n"
+    "fmla v22.4s, v21.4s, v12.4s\n"
+    "fmla v25.4s, v21.4s, v14.4s\n"
+    "fmla v0.4s, v27.4s, v8.4s\n"
+    "ldr s20, [x10, x23]\n"
+    "fmla v4.4s, v28.4s, v11.4s\n"
+    "fmla v18.4s, v27.4s, v9.4s\n"
+    "fmla v22.4s, v28.4s, v13.4s\n"
+    "ldr s26, [x26, x12]\n"
+    "fmla v23.4s, v27.4s, v10.4s\n"
+    "ldr s21, [x24, x14]\n"
+    "fmla v16.4s, v20.4s, v8.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v18.4s, v20.4s, v11.4s\n"
+    "fmla v19.4s, v20.4s, v9.4s\n"
+    "fmla v23.4s, v20.4s, v12.4s\n"
+    "fmla v25.4s, v20.4s, v10.4s\n"
+    "fmla v24.4s, v20.4s, v17.4s\n"
+    "ldr s28, [x27, x23]\n"
+    "fmla v1.4s, v26.4s, v8.4s\n"
+    "ldr s20, [x10, x12]\n"
+    "fmla v19.4s, v26.4s, v11.4s\n"
+    "fmla v22.4s, v26.4s, v9.4s\n"
+    "fmla v23.4s, v26.4s, v13.4s\n"
+    "fmla v25.4s, v26.4s, v12.4s\n"
+    "fmla v24.4s, v26.4s, v14.4s\n"
+    "ldr s17, [x26, x14]\n"
+    "fmla v4.4s, v21.4s, v8.4s\n"
+    "ldr s26, [x27, x12]\n"
+    "fmla v22.4s, v21.4s, v11.4s\n"
+    "add x26, x26, #4\n"
+    "fmla v25.4s, v21.4s, v13.4s\n"
+    "ldr s27, [x10, x14]\n"
+    "fmla v18.4s, v28.4s, v8.4s\n"
+    "add x10, x10, #4\n"
+    "fmla v23.4s, v28.4s, v9.4s\n"
+    "fmla v24.4s, v28.4s, v10.4s\n"
+    "fmla v19.4s, v20.4s, v8.4s\n"
+    "ldr s28, [x27, x14]\n"
+    "fmla v25.4s, v20.4s, v9.4s\n"
+    "add x27, x27, #4\n"
+    "fmla v23.4s, v20.4s, v11.4s\n"
+    "fmla v24.4s, v20.4s, v12.4s\n"
+    "fmla v22.4s, v17.4s, v8.4s\n"
+    "movi v29.16b, #0\n"
+    "fmla v25.4s, v17.4s, v11.4s\n"
+    "fmla v24.4s, v17.4s, v13.4s\n"
+    "fmla v23.4s, v26.4s, v8.4s\n"
+    "fmax v7.4s, v7.4s, v29.4s\n"
+    "fmla v25.4s, v27.4s, v8.4s\n"
+    "fmax v6.4s, v6.4s, v29.4s\n"
+    "str s7, [%[outptr0]]\n"
+    "fmla v24.4s, v26.4s, v9.4s\n"
+    "str s6, [%[outptr0], %[output_col_stride1]]\n"
+    "fmax v5.4s, v5.4s, v29.4s\n"
+    "fmax v4.4s, v4.4s, v29.4s\n"
+    "fmax v3.4s, v3.4s, v29.4s\n"
+    "str s5, [%[outptr0], x19]\n"
+    "fmla v24.4s, v27.4s, v11.4s\n"
+    "str s4, [%[outptr0], x20]\n"
+    "fmax v2.4s, v2.4s, v29.4s\n"
+    "str s3, [x16]\n"
+    "fmax v1.4s, v1.4s, v29.4s\n"
+    "str s2, [x16, %[output_col_stride1]]\n"
+    "fmla v24.4s, v28.4s, v8.4s\n"
+    "str s1, [x16, x19]\n"
+    "fmax v22.4s, v22.4s, v29.4s\n"
+    "fmax v15.4s, v15.4s, v29.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "str s22, [x16, x20]\n"
+    "fmax v16.4s, v16.4s, v29.4s\n"
+    "str s15, [x17]\n"
+    "fmax v19.4s, v19.4s, v29.4s\n"
+    "str s16, [x17, %[output_col_stride1]]\n"
+    "fmax v25.4s, v25.4s, v29.4s\n"
+    "str s19, [x17, x19]\n"
+    "fmax v0.4s, v0.4s, v29.4s\n"
+    "str s25, [x17, x20]\n"
+    "fmax v18.4s, v18.4s, v29.4s\n"
+    "str s0, [x18]\n"
+    "fmax v23.4s, v23.4s, v29.4s\n"
+    "str s18, [x18, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v29.4s\n"
+    "str s23, [x18, x19]\n"
+    "add x16, x16, #4\n"
+    "str s24, [x18, x20]\n"
+    "add x17, x17, #4\n"
+    "add x18, x18, #4\n"
+    "7:\n"
+    : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory"
+  );
+}
 
-    asm volatile (
-      "qW22 .req q0\n" "vW22 .req v0\n"
-      "qU64 .req q1\n" "qU35 .req q1\n" "qV41 .req q1\n"
-      "vU64 .req v1\n" "vU35 .req v1\n" "vV41 .req v1\n"
-      "qU34 .req q2\n" "qU21 .req q2\n" "qV43 .req q2\n"
-      "vU34 .req v2\n" "vU21 .req v2\n" "vV43 .req v2\n"
-      "qW21 .req q3\n" "vW21 .req v3\n"
-      "qU24 .req q4\n" "qU54 .req q4\n" "qV31 .req q4\n"
-      "vU24 .req v4\n" "vU54 .req v4\n" "vV31 .req v4\n"
-      "qV12 .req q5\n" "qU61 .req q5\n" "vV12 .req v5\n" "vU61 .req v5\n"
-      "qU26 .req q6\n" "qV32 .req q6\n" "vU26 .req v6\n" "vV32 .req v6\n"
-      "qU36 .req q7\n" "qU51 .req q7\n" "qU66 .req q7\n" "qU12 .req q7\n"
-      "vU36 .req v7\n" "vU51 .req v7\n" "vU66 .req v7\n" "vU12 .req v7\n"
-      "qV14 .req q8\n" "qV11 .req q8\n" "qU65 .req q8\n"
-      "vV14 .req v8\n" "vV11 .req v8\n" "vU65 .req v8\n"
-      "qU15 .req q9\n" "qU22 .req q9\n" "qU45 .req q9\n"
-      "vU15 .req v9\n" "vU22 .req v9\n" "vU45 .req v9\n"
-      "qV22 .req q10\n" "qU14 .req q10\n" "vV22 .req v10\n" "vU14 .req v10\n"
-      "qU44 .req q11\n" "qU43 .req q11\n" "qU11 .req q11\n"
-      "vU44 .req v11\n" "vU43 .req v11\n" "vU11 .req v11\n"
-      "qV24 .req q12\n" "qV42 .req q12\n" "vV24 .req v12\n" "vV42 .req v12\n"
-      "qW31 .req q13\n" "vW31 .req v13\n" "qW13 .req q14\n" "vW13 .req v14\n"
-      "qU33 .req q15\n" "qU62 .req q15\n" "qU25 .req q15\n" "qU56 .req q15\n"
-      "vU33 .req v15\n" "vU62 .req v15\n" "vU25 .req v15\n" "vU56 .req v15\n"
-      "qW33 .req q16\n" "vW33 .req v16\n"
-      "qU42 .req q17\n" "qU16 .req q17\n" "qV44 .req q17\n"
-      "vU42 .req v17\n" "vU16 .req v17\n" "vV44 .req v17\n"
-      "qU63 .req q18\n" "qU31 .req q18\n" "qV34 .req q18\n"
-      "vU63 .req v18\n" "vU31 .req v18\n" "vV34 .req v18\n"
-      "qW11 .req q19\n" "vW11 .req v19\n" "qU41 .req q20\n" "qV13 .req q20\n"
-      "vU41 .req v20\n" "vV13 .req v20\n" "qV33 .req q21\n" "vV33 .req v21\n"
-      "qU46 .req q22\n" "qU32 .req q22\n" "qU13 .req q22\n"
-      "vU46 .req v22\n" "vU32 .req v22\n" "vU13 .req v22\n" "qW23 .req q23\n"
-      "vW23 .req v23\n" "qV23 .req q24\n" "vV23 .req v24\n"
-      "qV21 .req q25\n" "qU55 .req q25\n" "vV21 .req v25\n" "vU55 .req v25\n"
-      "qW12 .req q26\n" "vW12 .req v26\n" "qW32 .req q27\n" "vW32 .req v27\n"
-      "qU23 .req q28\n" "qU52 .req q28\n"
-      "vU23 .req v28\n" "vU52 .req v28\n" "qU53 .req q29\n" "vU53 .req v29\n"
-
-      "uptr1 .req x0\n"
-      "uptr2 .req x1\n"
-      "uptr3 .req x2\n"
-      "uptr4 .req x3\n"
-      "uptr5 .req x4\n"
-
-      "vptr1 .req x5\n"
-      "vptr2 .req x6\n"
-      "vptr3 .req x7\n"
-
-      "wptr1 .req x8\n"
-      "wptr2 .req x9\n"
-
-      // Prepare pointers and strides
-      "add uptr1, %x[uptr0], %x[u_row_stride]\n"
-      "add uptr2,    uptr1 , %x[u_row_stride]\n"
-      "add uptr3,    uptr2 , %x[u_row_stride]\n"
-      "add uptr4,    uptr3 , %x[u_row_stride]\n"
-      "add uptr5,    uptr4 , %x[u_row_stride]\n"
-
-      "add vptr1, %x[vptr0], %x[v_row_stride]\n"
-      "add vptr2,    vptr1 , %x[v_row_stride]\n"
-      "add vptr3,    vptr2 , %x[v_row_stride]\n"
-
-      "add wptr1, %x[wptr0], %x[w_row_stride]\n"
-      "add wptr2,    wptr1 , %x[w_row_stride]\n"
-
-      // Load initial operands
-      "ldr qU16, [%x[uptr0], %x[uvw_col_stride5]]\n"
-      "ldr qW13, [%x[wptr0], %x[uvw_col_stride2]]\n"
-      "subs %x[c4_rem], %x[c4_rem], #1\n"
-      "ldr qU15, [%x[uptr0], %x[uvw_col_stride4]]\n"
-      "ldr qW23, [wptr1, %x[uvw_col_stride2]]\n"
-      "ldr qU14, [%x[uptr0], %x[uvw_col_stride3]]\n"
-      "ldr qW33, [wptr2, %x[uvw_col_stride2]]\n"
-      "ldr qU26, [uptr1, %x[uvw_col_stride5]]\n"
-      "ldr qW12, [%x[wptr0], %x[uvw_col_stride1]]\n"
-      "ldr qU25, [uptr1, %x[uvw_col_stride4]]\n"
-      "ldr qW22, [wptr1, %x[uvw_col_stride1]]\n"
-      "ldr qU36, [uptr2, %x[uvw_col_stride5]]\n"
-      "ldr qW32, [wptr2, %x[uvw_col_stride1]]\n"
-      "ldr qW11, [%x[wptr0]], #0x10\n"
-      "fmul vV14.4s, vU16.4s, vW13.4s\n"
-      "ldr qU24, [uptr1, %x[uvw_col_stride3]]\n"
-      "fmul vV13.4s, vU15.4s, vW13.4s\n"
-      "ldr qW31, [wptr2], #0x10\n"
-      "fmla vV14.4s, vU15.4s, vW12.4s\n"
-      "ldr qW21, [wptr1], #0x10\n"
-      "fmul vV12.4s, vU14.4s, vW13.4s\n"
-      "ldr qU34, [uptr2, %x[uvw_col_stride3]]\n"
-      "fmla vV13.4s, vU14.4s, vW12.4s\n"
-      "ldr qU46, [uptr3, %x[uvw_col_stride5]]\n"
-      "fmla vV14.4s, vU14.4s, vW11.4s\n"
-      "ldr qU45, [uptr3, %x[uvw_col_stride4]]\n"
-      "fmla vV14.4s, vU26.4s, vW23.4s\n"
-      "ldr qU35, [uptr2, %x[uvw_col_stride4]]\n"
-      "fmul vV24.4s, vU26.4s, vW13.4s\n"
-      "ldr qU44, [uptr3, %x[uvw_col_stride3]]\n"
-      "fmla vV13.4s, vU25.4s, vW23.4s\n"
-      "beq 2f\n"  // Single iteration only
-
-      "1:"  // Loop body
-        "fmla vV14.4s, vU25.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[wptr0], %[prftch]]\n"
-        "fmul vV23.4s, vU25.4s, vW13.4s\n"
-        "prfm pldl1keep, [%x[wptr0], %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV24.4s, vU25.4s, vW12.4s\n"
-        "ldr qU56, [uptr4, %x[uvw_col_stride5]]\n"
-        "fmla vV12.4s, vU24.4s, vW23.4s\n"
-        "prfm pldl1keep, [%x[wptr0], %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV13.4s, vU24.4s, vW22.4s\n"
-        "prfm pldl1keep, [   wptr1 , %[prftch]]\n"
-        "fmla vV14.4s, vU24.4s, vW21.4s\n"
-        "prfm pldl1keep, [   wptr1 , %x[prftch_uvw_col_stride1]]\n"
-        "fmul vV22.4s, vU24.4s, vW13.4s\n"
-        "prfm pldl1keep, [   wptr1 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV23.4s, vU24.4s, vW12.4s\n"
-        "prfm pldl1keep, [   wptr2 , %x[prftch]]\n"
-        "fmla vV24.4s, vU24.4s, vW11.4s\n"
-        "ldr qU55, [uptr4, %x[uvw_col_stride4]]\n"
-        "fmla vV14.4s, vU36.4s, vW33.4s\n"
-        "prfm pldl1keep, [   wptr2 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV24.4s, vU36.4s, vW23.4s\n"
-        "prfm pldl1keep, [   wptr2 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmul vV34.4s, vU36.4s, vW13.4s\n"
-        "ldr qU54, [uptr4, %x[uvw_col_stride3]]\n"
-        "fmla vV13.4s, vU35.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV14.4s, vU35.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV23.4s, vU35.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV24.4s, vU35.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[prftch_uvw_col_stride4] ]\n"
-        "fmul vV33.4s, vU35.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV34.4s, vU35.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, %x[uvw_col_stride5]]\n"
-        "fmla vV12.4s, vU34.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr3 , %[prftch]]\n"
-        "fmla vV13.4s, vU34.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV14.4s, vU34.4s, vW31.4s\n"
-        "str qV14, [%x[vptr0], %x[uvw_col_stride3]]\n"
-        "fmla vV22.4s, vU34.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV23.4s, vU34.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV24.4s, vU34.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[prftch_uvw_col_stride4] ]\n"
-        "fmul vV32.4s, vU34.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV33.4s, vU34.4s, vW12.4s\n"
-        "prfm pldl1keep, [   uptr4 , %[prftch]]\n"
-        "fmla vV34.4s, vU34.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, %x[uvw_col_stride4]]\n"
-        "fmla vV24.4s, vU46.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV34.4s, vU46.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmul vV44.4s, vU46.4s, vW13.4s\n"
-        "ldr qU64, [uptr5, %x[uvw_col_stride3]]\n"
-        "fmla vV23.4s, vU45.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV24.4s, vU45.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[prftch_uvw_col_stride4] ]\n"
-        "fmla vV33.4s, vU45.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV34.4s, vU45.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr5 , %[prftch]]\n"
-        "fmul vV43.4s, vU45.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV44.4s, vU45.4s, vW12.4s\n"
-        "ldr qU13, [%x[uptr0], %x[uvw_col_stride2]]\n"
-        "fmla vV22.4s, vU44.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV23.4s, vU44.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV24.4s, vU44.4s, vW31.4s\n"
-        "str qV24, [vptr1, %x[uvw_col_stride3]]\n"
-        "fmla vV32.4s, vU44.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[prftch_uvw_col_stride4] ]\n"
-        "fmla vV33.4s, vU44.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV34.4s, vU44.4s, vW21.4s\n"
-        "prfm pstl1keep, [%x[vptr0], %[prftch]]\n"
-        "fmul vV42.4s, vU44.4s, vW13.4s\n"
-        "prfm pstl1keep, [%x[vptr0], %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV43.4s, vU44.4s, vW12.4s\n"
-        "prfm pstl1keep, [%x[vptr0], %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV44.4s, vU44.4s, vW11.4s\n"
-        "ldr qU23, [uptr1, %x[uvw_col_stride2]]\n"
-        "fmla vV34.4s, vU56.4s, vW33.4s\n"
-        "prfm pstl1keep, [%x[vptr0], %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV44.4s, vU56.4s, vW23.4s\n"
-        "ldr qU33, [uptr2, %x[uvw_col_stride2]]\n"
-        "fmla vV33.4s, vU55.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr1 , %[prftch]]\n"
-        "fmla vV34.4s, vU55.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr1 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV43.4s, vU55.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr1 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV44.4s, vU55.4s, vW22.4s\n"
-        "ldr qU43, [uptr3, %x[uvw_col_stride2]]\n"
-        "fmla vV32.4s, vU54.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr1 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV33.4s, vU54.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr2 , %[prftch]]\n"
-        "fmla vV34.4s, vU54.4s, vW31.4s\n"
-        "str qV34, [vptr2, %x[uvw_col_stride3]]\n"
-        "fmla vV42.4s, vU54.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr2 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV43.4s, vU54.4s, vW22.4s\n"
-        "prfm pstl1keep, [   vptr2 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV44.4s, vU54.4s, vW21.4s\n"
-        "ldr qU53, [uptr4, %x[uvw_col_stride2]]\n"
-        "fmla vV44.4s, vU66.4s, vW33.4s\n"
-        "ldr qU63, [uptr5, %x[uvw_col_stride2]]\n"
-        "fmla vV43.4s, vU65.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr2 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV44.4s, vU65.4s, vW32.4s\n"
-        "ldr qU12, [%x[uptr0], %x[uvw_col_stride1]]\n"
-        "fmla vV42.4s, vU64.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr3 , %[prftch]]\n"
-        "fmla vV43.4s, vU64.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr3 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV44.4s, vU64.4s, vW31.4s\n"
-        "str qV44, [vptr3, %x[uvw_col_stride3]]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "ldr qU22, [uptr1, %x[uvw_col_stride1]]\n"
-        "fmla vV12.4s, vU13.4s, vW12.4s\n"
-        "prfm pstl1keep, [   vptr3 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV13.4s, vU13.4s, vW11.4s\n"
-        "ldr qU32, [uptr2, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr3 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV12.4s, vU23.4s, vW22.4s\n"
-        "fmla vV13.4s, vU23.4s, vW21.4s\n"
-        "fmul vV21.4s, vU23.4s, vW13.4s\n"
-        "fmla vV22.4s, vU23.4s, vW12.4s\n"
-        "fmla vV23.4s, vU23.4s, vW11.4s\n"
-        "ldr qU42, [uptr3, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "fmla vV12.4s, vU33.4s, vW32.4s\n"
-        "fmla vV13.4s, vU33.4s, vW31.4s\n"
-        "str qV13, [%x[vptr0], %x[uvw_col_stride2]]\n"
-        "fmla vV21.4s, vU33.4s, vW23.4s\n"
-        "fmla vV22.4s, vU33.4s, vW22.4s\n"
-        "fmla vV23.4s, vU33.4s, vW21.4s\n"
-        "fmul vV31.4s, vU33.4s, vW13.4s\n"
-        "fmla vV32.4s, vU33.4s, vW12.4s\n"
-        "fmla vV33.4s, vU33.4s, vW11.4s\n"
-        "ldr qU52, [uptr4, %x[uvw_col_stride1]]\n"
-        "fmla vV21.4s, vU43.4s, vW33.4s\n"
-        "fmla vV22.4s, vU43.4s, vW32.4s\n"
-        "fmla vV23.4s, vU43.4s, vW31.4s\n"
-        "str qV23, [vptr1, %x[uvw_col_stride2]]\n"
-        "fmla vV31.4s, vU43.4s, vW23.4s\n"
-        "fmla vV32.4s, vU43.4s, vW22.4s\n"
-        "fmla vV33.4s, vU43.4s, vW21.4s\n"
-        "fmul vV41.4s, vU43.4s, vW13.4s\n"
-        "ldr qW13, [%x[wptr0], %x[uvw_col_stride2]]\n"
-        "fmla vV42.4s, vU43.4s, vW12.4s\n"
-        "fmla vV43.4s, vU43.4s, vW11.4s\n"
-        "ldr qU62, [uptr5, %x[uvw_col_stride1]]\n"
-        "fmla vV31.4s, vU53.4s, vW33.4s\n"
-        "fmla vV32.4s, vU53.4s, vW32.4s\n"
-        "fmla vV33.4s, vU53.4s, vW31.4s\n"
-        "str qV33, [vptr2, %x[uvw_col_stride2]]\n"
-        "fmla vV41.4s, vU53.4s, vW23.4s\n"
-        "ldr qW23, [wptr1, %x[uvw_col_stride2]]\n"
-        "fmla vV42.4s, vU53.4s, vW22.4s\n"
-        "fmla vV43.4s, vU53.4s, vW21.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV41.4s, vU63.4s, vW33.4s\n"
-        "ldr qW33, [wptr2, %x[uvw_col_stride2]]\n"
-        "fmla vV42.4s, vU63.4s, vW32.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %[prftch]]\n"
-        "fmla vV43.4s, vU63.4s, vW31.4s\n"
-        "str qV43, [vptr3, %x[uvw_col_stride2]]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV12.4s, vU12.4s, vW11.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV12.4s, vU22.4s, vW21.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV21.4s, vU22.4s, vW12.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV22.4s, vU22.4s, vW11.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[prftch_uvw_col_stride4] ]\n"
-        "fmla vV12.4s, vU32.4s, vW31.4s\n"
-        "str qV12, [%x[vptr0], %x[uvw_col_stride1]]\n"
-        "fmla vV21.4s, vU32.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV22.4s, vU32.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr1 , %[prftch]]\n"
-        "fmla vV31.4s, vU32.4s, vW12.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[prftch_uvw_col_stride1]]\n"
-        "fmla vV32.4s, vU32.4s, vW11.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[prftch_uvw_col_stride2] ]\n"
-        "fmla vV22.4s, vU42.4s, vW31.4s\n"
-        "str qV22, [vptr1, %x[uvw_col_stride1]]\n"
-        "fmla vV31.4s, vU42.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[prftch_uvw_col_stride3] ]\n"
-        "fmla vV32.4s, vU42.4s, vW21.4s\n"
-        "subs %x[c4_rem], %x[c4_rem], #1\n"
-        "fmla vV41.4s, vU42.4s, vW12.4s\n"
-        "ldr qW12, [%x[wptr0], %x[uvw_col_stride1]]\n"
-        "fmla vV42.4s, vU42.4s, vW11.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV31.4s, vU52.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[prftch_uvw_col_stride4] ]\n"
-        "fmla vV32.4s, vU52.4s, vW31.4s\n"
-        "str qV32, [vptr2, %x[uvw_col_stride1]]\n"
-        "fmla vV41.4s, vU52.4s, vW22.4s\n"
-        "ldr qW22, [wptr1, %x[uvw_col_stride1]]\n"
-        "fmla vV42.4s, vU52.4s, vW21.4s\n"
-        "ldr qU16, [%x[uptr0], %x[uvw_col_stride5]]\n"
-        "fmla vV41.4s, vU62.4s, vW32.4s\n"
-        "ldr qW32, [wptr2, %x[uvw_col_stride1]]\n"
-        "fmla vV42.4s, vU62.4s, vW31.4s\n"
-        "str qV42, [vptr3, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "ldr qU15, [%x[uptr0], %x[uvw_col_stride4]]\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "ldr qU14, [%x[uptr0], %x[uvw_col_stride3]]\n"
-        "fmla vV21.4s, vU21.4s, vW11.4s\n"
-        "ldr qU26, [uptr1, %x[uvw_col_stride5]]\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU31.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[prftch_uvw_col_stride5] ]\n"
-        "fmla vV31.4s, vU31.4s, vW11.4s\n"
-        "ldr qU25, [uptr1, %x[uvw_col_stride4]]\n"
-        "fmla vV21.4s, vU41.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU41.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr2 , %[prftch]]\n"
-        "fmla vV41.4s, vU41.4s, vW11.4s\n"
-        "ldr qW11, [%x[wptr0]], #0x10\n"
-        "fmla vV31.4s, vU51.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-        "fmla vV41.4s, vU51.4s, vW21.4s\n"
-        "ldr qU36, [uptr2, %x[uvw_col_stride5]]\n"
-        "fmla vV41.4s, vU61.4s, vW31.4s\n"
-        "str qV41, [vptr3], #0x10\n"
-        "fmul vV14.4s, vU16.4s, vW13.4s\n"
-        "ldr qU24, [uptr1, %x[uvw_col_stride3]]\n"
-        "fmul vV13.4s, vU15.4s, vW13.4s\n"
-        "ldr qW31, [wptr2], #0x10\n"
-        "fmla vV14.4s, vU15.4s, vW12.4s\n"
-        "ldr qW21, [wptr1], #0x10\n"
-        "fmul vV12.4s, vU14.4s, vW13.4s\n"
-        "ldr qU34, [uptr2, %x[uvw_col_stride3]]\n"
-        "fmla vV13.4s, vU14.4s, vW12.4s\n"
-        "ldr qU46, [uptr3, %x[uvw_col_stride5]]\n"
-        "fmla vV14.4s, vU14.4s, vW11.4s\n"
-        "ldr qU45, [uptr3, %x[uvw_col_stride4]]\n"
-        "fmla vV14.4s, vU26.4s, vW23.4s\n"
-        "ldr qU35, [uptr2, %x[uvw_col_stride4]]\n"
-        "fmul vV24.4s, vU26.4s, vW13.4s\n"
-        "ldr qU44, [uptr3, %x[uvw_col_stride3]]\n"
-        "fmla vV13.4s, vU25.4s, vW23.4s\n"
-        "bne 1b\n"
-
-      "2:"  // Final iteration
-        "fmla vV14.4s, vU25.4s, vW22.4s\n"
-        "fmul vV23.4s, vU25.4s, vW13.4s\n"
-        "fmla vV24.4s, vU25.4s, vW12.4s\n"
-        "ldr qU56, [uptr4, %x[uvw_col_stride5]]\n"
-        "fmla vV12.4s, vU24.4s, vW23.4s\n"
-        "fmla vV13.4s, vU24.4s, vW22.4s\n"
-        "fmla vV14.4s, vU24.4s, vW21.4s\n"
-        "fmul vV22.4s, vU24.4s, vW13.4s\n"
-        "fmla vV23.4s, vU24.4s, vW12.4s\n"
-        "fmla vV24.4s, vU24.4s, vW11.4s\n"
-        "ldr qU55, [uptr4, %x[uvw_col_stride4]]\n"
-        "fmla vV14.4s, vU36.4s, vW33.4s\n"
-        "fmla vV24.4s, vU36.4s, vW23.4s\n"
-        "fmul vV34.4s, vU36.4s, vW13.4s\n"
-        "ldr qU54, [uptr4, %x[uvw_col_stride3]]\n"
-        "fmla vV13.4s, vU35.4s, vW33.4s\n"
-        "fmla vV14.4s, vU35.4s, vW32.4s\n"
-        "fmla vV23.4s, vU35.4s, vW23.4s\n"
-        "fmla vV24.4s, vU35.4s, vW22.4s\n"
-        "fmul vV33.4s, vU35.4s, vW13.4s\n"
-        "fmla vV34.4s, vU35.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, %x[uvw_col_stride5]]\n"
-        "fmla vV12.4s, vU34.4s, vW33.4s\n"
-        "fmla vV13.4s, vU34.4s, vW32.4s\n"
-        "fmla vV14.4s, vU34.4s, vW31.4s\n"
-        "str qV14, [%x[vptr0], %x[uvw_col_stride3]]\n"
-        "fmla vV22.4s, vU34.4s, vW23.4s\n"
-        "fmla vV23.4s, vU34.4s, vW22.4s\n"
-        "fmla vV24.4s, vU34.4s, vW21.4s\n"
-        "fmul vV32.4s, vU34.4s, vW13.4s\n"
-        "fmla vV33.4s, vU34.4s, vW12.4s\n"
-        "fmla vV34.4s, vU34.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, %x[uvw_col_stride4]]\n"
-        "fmla vV24.4s, vU46.4s, vW33.4s\n"
-        "fmla vV34.4s, vU46.4s, vW23.4s\n"
-        "fmul vV44.4s, vU46.4s, vW13.4s\n"
-        "ldr qU64, [uptr5, %x[uvw_col_stride3]]\n"
-        "fmla vV23.4s, vU45.4s, vW33.4s\n"
-        "fmla vV24.4s, vU45.4s, vW32.4s\n"
-        "fmla vV33.4s, vU45.4s, vW23.4s\n"
-        "fmla vV34.4s, vU45.4s, vW22.4s\n"
-        "fmul vV43.4s, vU45.4s, vW13.4s\n"
-        "fmla vV44.4s, vU45.4s, vW12.4s\n"
-        "ldr qU13, [%x[uptr0], %x[uvw_col_stride2]]\n"
-        "fmla vV22.4s, vU44.4s, vW33.4s\n"
-        "fmla vV23.4s, vU44.4s, vW32.4s\n"
-        "fmla vV24.4s, vU44.4s, vW31.4s\n"
-        "str qV24, [vptr1, %x[uvw_col_stride3]]\n"
-        "fmla vV32.4s, vU44.4s, vW23.4s\n"
-        "fmla vV33.4s, vU44.4s, vW22.4s\n"
-        "fmla vV34.4s, vU44.4s, vW21.4s\n"
-        "fmul vV42.4s, vU44.4s, vW13.4s\n"
-        "fmla vV43.4s, vU44.4s, vW12.4s\n"
-        "fmla vV44.4s, vU44.4s, vW11.4s\n"
-        "ldr qU23, [uptr1, %x[uvw_col_stride2]]\n"
-        "fmla vV34.4s, vU56.4s, vW33.4s\n"
-        "fmla vV44.4s, vU56.4s, vW23.4s\n"
-        "ldr qU33, [uptr2, %x[uvw_col_stride2]]\n"
-        "fmla vV33.4s, vU55.4s, vW33.4s\n"
-        "fmla vV34.4s, vU55.4s, vW32.4s\n"
-        "fmla vV43.4s, vU55.4s, vW23.4s\n"
-        "fmla vV44.4s, vU55.4s, vW22.4s\n"
-        "ldr qU43, [uptr3, %x[uvw_col_stride2]]\n"
-        "fmla vV32.4s, vU54.4s, vW33.4s\n"
-        "fmla vV33.4s, vU54.4s, vW32.4s\n"
-        "fmla vV34.4s, vU54.4s, vW31.4s\n"
-        "str qV34, [vptr2, %x[uvw_col_stride3]]\n"
-        "fmla vV42.4s, vU54.4s, vW23.4s\n"
-        "fmla vV43.4s, vU54.4s, vW22.4s\n"
-        "fmla vV44.4s, vU54.4s, vW21.4s\n"
-        "ldr qU53, [uptr4, %x[uvw_col_stride2]]\n"
-        "fmla vV44.4s, vU66.4s, vW33.4s\n"
-        "ldr qU63, [uptr5, %x[uvw_col_stride2]]\n"
-        "fmla vV43.4s, vU65.4s, vW33.4s\n"
-        "fmla vV44.4s, vU65.4s, vW32.4s\n"
-        "ldr qU12, [%x[uptr0], %x[uvw_col_stride1]]\n"
-        "fmla vV42.4s, vU64.4s, vW33.4s\n"
-        "fmla vV43.4s, vU64.4s, vW32.4s\n"
-        "fmla vV44.4s, vU64.4s, vW31.4s\n"
-        "str qV44, [vptr3, %x[uvw_col_stride3]]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "ldr qU22, [uptr1, %x[uvw_col_stride1]]\n"
-        "fmla vV12.4s, vU13.4s, vW12.4s\n"
-        "fmla vV13.4s, vU13.4s, vW11.4s\n"
-        "ldr qU32, [uptr2, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "fmla vV12.4s, vU23.4s, vW22.4s\n"
-        "fmla vV13.4s, vU23.4s, vW21.4s\n"
-        "fmul vV21.4s, vU23.4s, vW13.4s\n"
-        "fmla vV22.4s, vU23.4s, vW12.4s\n"
-        "fmla vV23.4s, vU23.4s, vW11.4s\n"
-        "ldr qU42, [uptr3, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "fmla vV12.4s, vU33.4s, vW32.4s\n"
-        "fmla vV13.4s, vU33.4s, vW31.4s\n"
-        "str qV13, [%x[vptr0], %x[uvw_col_stride2]]\n"
-        "fmla vV21.4s, vU33.4s, vW23.4s\n"
-        "fmla vV22.4s, vU33.4s, vW22.4s\n"
-        "fmla vV23.4s, vU33.4s, vW21.4s\n"
-        "fmul vV31.4s, vU33.4s, vW13.4s\n"
-        "fmla vV32.4s, vU33.4s, vW12.4s\n"
-        "fmla vV33.4s, vU33.4s, vW11.4s\n"
-        "ldr qU52, [uptr4, %x[uvw_col_stride1]]\n"
-        "fmla vV21.4s, vU43.4s, vW33.4s\n"
-        "fmla vV22.4s, vU43.4s, vW32.4s\n"
-        "fmla vV23.4s, vU43.4s, vW31.4s\n"
-        "str qV23, [vptr1, %x[uvw_col_stride2]]\n"
-        "fmla vV31.4s, vU43.4s, vW23.4s\n"
-        "fmla vV32.4s, vU43.4s, vW22.4s\n"
-        "fmla vV33.4s, vU43.4s, vW21.4s\n"
-        "fmul vV41.4s, vU43.4s, vW13.4s\n"
-        "fmla vV42.4s, vU43.4s, vW12.4s\n"
-        "fmla vV43.4s, vU43.4s, vW11.4s\n"
-        "ldr qU62, [uptr5, %x[uvw_col_stride1]]\n"
-        "fmla vV31.4s, vU53.4s, vW33.4s\n"
-        "fmla vV32.4s, vU53.4s, vW32.4s\n"
-        "fmla vV33.4s, vU53.4s, vW31.4s\n"
-        "str qV33, [vptr2, %x[uvw_col_stride2]]\n"
-        "fmla vV41.4s, vU53.4s, vW23.4s\n"
-        "fmla vV42.4s, vU53.4s, vW22.4s\n"
-        "fmla vV43.4s, vU53.4s, vW21.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV41.4s, vU63.4s, vW33.4s\n"
-        "fmla vV42.4s, vU63.4s, vW32.4s\n"
-        "fmla vV43.4s, vU63.4s, vW31.4s\n"
-        "str qV43, [vptr3, %x[uvw_col_stride2]]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV12.4s, vU12.4s, vW11.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "fmla vV12.4s, vU22.4s, vW21.4s\n"
-        "fmla vV21.4s, vU22.4s, vW12.4s\n"
-        "fmla vV22.4s, vU22.4s, vW11.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "fmla vV12.4s, vU32.4s, vW31.4s\n"
-        "str qV12, [%x[vptr0], %x[uvw_col_stride1]]\n"
-        "fmla vV21.4s, vU32.4s, vW22.4s\n"
-        "fmla vV22.4s, vU32.4s, vW21.4s\n"
-        "fmla vV31.4s, vU32.4s, vW12.4s\n"
-        "fmla vV32.4s, vU32.4s, vW11.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW32.4s\n"
-        "fmla vV22.4s, vU42.4s, vW31.4s\n"
-        "str qV22, [vptr1, %x[uvw_col_stride1]]\n"
-        "fmla vV31.4s, vU42.4s, vW22.4s\n"
-        "fmla vV32.4s, vU42.4s, vW21.4s\n"
-        "subs %x[c4_rem], %x[c4_rem], #1\n"
-        "fmla vV41.4s, vU42.4s, vW12.4s\n"
-        "fmla vV42.4s, vU42.4s, vW11.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV31.4s, vU52.4s, vW32.4s\n"
-        "fmla vV32.4s, vU52.4s, vW31.4s\n"
-        "str qV32, [vptr2, %x[uvw_col_stride1]]\n"
-        "fmla vV41.4s, vU52.4s, vW22.4s\n"
-        "fmla vV42.4s, vU52.4s, vW21.4s\n"
-        "fmla vV41.4s, vU62.4s, vW32.4s\n"
-        "fmla vV42.4s, vU62.4s, vW31.4s\n"
-        "str qV42, [vptr3, %x[uvw_col_stride1]]\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "fmla vV21.4s, vU21.4s, vW11.4s\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU31.4s, vW21.4s\n"
-        "fmla vV31.4s, vU31.4s, vW11.4s\n"
-        "fmla vV21.4s, vU41.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU41.4s, vW21.4s\n"
-        "fmla vV41.4s, vU41.4s, vW11.4s\n"
-        "fmla vV31.4s, vU51.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-        "fmla vV41.4s, vU51.4s, vW21.4s\n"
-        "fmla vV41.4s, vU61.4s, vW31.4s\n"
-        "str qV41, [vptr3], #0x10\n"
-
-      ".unreq qW22\n" ".unreq qU64\n" ".unreq qU35\n" ".unreq qV41\n"
-      ".unreq qU34\n" ".unreq qU21\n" ".unreq qV43\n" ".unreq qW21\n"
-      ".unreq qU24\n" ".unreq qU54\n" ".unreq qV31\n" ".unreq qV12\n"
-      ".unreq qU61\n" ".unreq qU26\n" ".unreq qV32\n"
-      ".unreq qU36\n" ".unreq qU51\n" ".unreq qU66\n" ".unreq qU12\n"
-      ".unreq qV14\n" ".unreq qV11\n" ".unreq qU65\n"
-      ".unreq qU15\n" ".unreq qU22\n" ".unreq qU45\n"
-      ".unreq qV22\n" ".unreq qU14\n"
-      ".unreq qU44\n" ".unreq qU43\n" ".unreq qU11\n"
-      ".unreq qV24\n" ".unreq qV42\n" ".unreq qW31\n" ".unreq qW13\n"
-      ".unreq qU33\n" ".unreq qU62\n" ".unreq qU25\n" ".unreq qU56\n"
-      ".unreq qW33\n"
-      ".unreq qU42\n" ".unreq qU16\n" ".unreq qV44\n"
-      ".unreq qU63\n" ".unreq qU31\n" ".unreq qV34\n"
-      ".unreq qW11\n" ".unreq qU41\n" ".unreq qV13\n" ".unreq qV33\n"
-      ".unreq qU46\n" ".unreq qU32\n" ".unreq qU13\n"
-      ".unreq qW23\n" ".unreq qV23\n" ".unreq qV21\n" ".unreq qU55\n"
-      ".unreq qW12\n" ".unreq qW32\n" ".unreq qU23\n" ".unreq qU52\n"
-      ".unreq qU53\n" ".unreq vW22\n"
-      ".unreq vU64\n" ".unreq vU35\n" ".unreq vV41\n"
-      ".unreq vU34\n" ".unreq vU21\n" ".unreq vV43\n" ".unreq vW21\n"
-      ".unreq vU24\n" ".unreq vU54\n" ".unreq vV31\n"
-      ".unreq vV12\n" ".unreq vU61\n"
-      ".unreq vU26\n" ".unreq vV32\n"
-      ".unreq vU36\n" ".unreq vU51\n" ".unreq vU66\n" ".unreq vU12\n"
-      ".unreq vV14\n" ".unreq vV11\n" ".unreq vU65\n"
-      ".unreq vU15\n" ".unreq vU22\n" ".unreq vU45\n"
-      ".unreq vV22\n" ".unreq vU14\n"
-      ".unreq vU44\n" ".unreq vU43\n" ".unreq vU11\n"
-      ".unreq vV24\n" ".unreq vV42\n" ".unreq vW31\n" ".unreq vW13\n"
-      ".unreq vU33\n" ".unreq vU62\n" ".unreq vU25\n" ".unreq vU56\n"
-      ".unreq vW33\n" ".unreq vU42\n" ".unreq vU16\n" ".unreq vV44\n"
-      ".unreq vU63\n" ".unreq vU31\n" ".unreq vV34\n" ".unreq vW11\n"
-      ".unreq vU41\n" ".unreq vV13\n" ".unreq vV33\n"
-      ".unreq vU46\n" ".unreq vU32\n" ".unreq vU13\n" ".unreq vW23\n"
-      ".unreq vV23\n" ".unreq vV21\n" ".unreq vU55\n" ".unreq vW12\n"
-      ".unreq vW32\n" ".unreq vU23\n" ".unreq vU52\n" ".unreq vU53\n"
-      : [uptr0] "+r" (uptr0), [vptr0] "+r" (vptr0), [wptr0] "+r" (wptr0),
-        [c4_rem] "+r" (c4_rem)
-      : [u_row_stride] "r" (in_row_stride * sizeof(float)),
-        [v_row_stride] "r" (out_row_stride * sizeof(float)),
-        [w_row_stride] "r" (weight_row_stride * sizeof(float)),
-        [uvw_col_stride1] "r" (1 * in_col_stride * sizeof(float)),
-        [uvw_col_stride2] "r" (2 * in_col_stride * sizeof(float)),
-        [uvw_col_stride3] "r" (3 * in_col_stride * sizeof(float)),
-        [uvw_col_stride4] "r" (4 * in_col_stride * sizeof(float)),
-        [uvw_col_stride5] "r" (5 * in_col_stride * sizeof(float)),
-        [prftch] "i" (prefetch_depth * sizeof(float)),
-        [prftch_uvw_col_stride1] "r" ((prefetch_depth + 1 * in_col_stride) * sizeof(float)),
-        [prftch_uvw_col_stride2] "r" ((prefetch_depth + 2 * in_col_stride) * sizeof(float)),
-        [prftch_uvw_col_stride3] "r" ((prefetch_depth + 3 * in_col_stride) * sizeof(float)),
-        [prftch_uvw_col_stride4] "r" ((prefetch_depth + 4 * in_col_stride) * sizeof(float)),
-        [prftch_uvw_col_stride5] "r" ((prefetch_depth + 5 * in_col_stride) * sizeof(float))
-      : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
-        "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
-        "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x0",
-        "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
-    );
-  }
-  else if (channels_remaining >= 4)
-  {
-    int c4_rem = channels_remaining / 4;
-    channels_remaining %= 4;
-
-    asm volatile (
-      "qW22 .req q0\n" "vW22 .req v0\n"
-      "qU64 .req q1\n" "qU35 .req q1\n" "qV41 .req q1\n"
-      "vU64 .req v1\n" "vU35 .req v1\n" "vV41 .req v1\n"
-      "qU34 .req q2\n" "qU21 .req q2\n" "qV43 .req q2\n"
-      "vU34 .req v2\n" "vU21 .req v2\n" "vV43 .req v2\n"
-      "qW21 .req q3\n" "vW21 .req v3\n"
-      "qU24 .req q4\n" "qU54 .req q4\n" "qV31 .req q4\n"
-      "vU24 .req v4\n" "vU54 .req v4\n" "vV31 .req v4\n"
-      "qV12 .req q5\n" "qU61 .req q5\n" "vV12 .req v5\n" "vU61 .req v5\n"
-      "qU26 .req q6\n" "qV32 .req q6\n" "vU26 .req v6\n" "vV32 .req v6\n"
-      "qU36 .req q7\n" "qU51 .req q7\n" "qU66 .req q7\n" "qU12 .req q7\n"
-      "vU36 .req v7\n" "vU51 .req v7\n" "vU66 .req v7\n" "vU12 .req v7\n"
-      "qV14 .req q8\n" "qV11 .req q8\n" "qU65 .req q8\n"
-      "vV14 .req v8\n" "vV11 .req v8\n" "vU65 .req v8\n"
-      "qU15 .req q9\n" "qU22 .req q9\n" "qU45 .req q9\n"
-      "vU15 .req v9\n" "vU22 .req v9\n" "vU45 .req v9\n"
-      "qV22 .req q10\n" "qU14 .req q10\n" "vV22 .req v10\n" "vU14 .req v10\n"
-      "qU44 .req q11\n" "qU43 .req q11\n" "qU11 .req q11\n"
-      "vU44 .req v11\n" "vU43 .req v11\n" "vU11 .req v11\n"
-      "qV24 .req q12\n" "qV42 .req q12\n" "vV24 .req v12\n" "vV42 .req v12\n"
-      "qW31 .req q13\n" "vW31 .req v13\n" "qW13 .req q14\n" "vW13 .req v14\n"
-      "qU33 .req q15\n" "qU62 .req q15\n" "qU25 .req q15\n" "qU56 .req q15\n"
-      "vU33 .req v15\n" "vU62 .req v15\n" "vU25 .req v15\n" "vU56 .req v15\n"
-      "qW33 .req q16\n" "vW33 .req v16\n"
-      "qU42 .req q17\n" "qU16 .req q17\n" "qV44 .req q17\n"
-      "vU42 .req v17\n" "vU16 .req v17\n" "vV44 .req v17\n"
-      "qU63 .req q18\n" "qU31 .req q18\n" "qV34 .req q18\n"
-      "vU63 .req v18\n" "vU31 .req v18\n" "vV34 .req v18\n"
-      "qW11 .req q19\n" "vW11 .req v19\n" "qU41 .req q20\n" "qV13 .req q20\n"
-      "vU41 .req v20\n" "vV13 .req v20\n" "qV33 .req q21\n" "vV33 .req v21\n"
-      "qU46 .req q22\n" "qU32 .req q22\n" "qU13 .req q22\n"
-      "vU46 .req v22\n" "vU32 .req v22\n" "vU13 .req v22\n" "qW23 .req q23\n"
-      "vW23 .req v23\n" "qV23 .req q24\n" "vV23 .req v24\n"
-      "qV21 .req q25\n" "qU55 .req q25\n" "vV21 .req v25\n" "vU55 .req v25\n"
-      "qW12 .req q26\n" "vW12 .req v26\n" "qW32 .req q27\n" "vW32 .req v27\n"
-      "qU23 .req q28\n" "qU52 .req q28\n"
-      "vU23 .req v28\n" "vU52 .req v28\n" "qU53 .req q29\n" "vU53 .req v29\n"
-
-      "uptr1 .req x0\n"
-      "uptr2 .req x1\n"
-      "uptr3 .req x2\n"
-      "uptr4 .req x3\n"
-      "uptr5 .req x4\n"
-
-      "vptr1 .req x5\n"
-      "vptr2 .req x6\n"
-      "vptr3 .req x7\n"
-
-      "wptr1 .req x8\n"
-      "wptr2 .req x9\n"
-
-      "u_col_stride2 .req x10\n"
-      "u_col_stride3 .req x11\n"
-      "u_col_stride4 .req x12\n"
-      "u_col_stride5 .req x13\n"
-
-      "v_col_stride2 .req x14\n"
-      "v_col_stride3 .req x15\n"
-
-      "w_col_stride2 .req x16\n"
-
-      // Prepare pointers and strides
-      "add uptr1, %x[uptr0], %x[u_row_stride]\n"
-      "add uptr2,    uptr1 , %x[u_row_stride]\n"
-      "add uptr3,    uptr2 , %x[u_row_stride]\n"
-      "add uptr4,    uptr3 , %x[u_row_stride]\n"
-      "add uptr5,    uptr4 , %x[u_row_stride]\n"
-
-      "add vptr1, %x[vptr0], %x[v_row_stride]\n"
-      "add vptr2,    vptr1 , %x[v_row_stride]\n"
-      "add vptr3,    vptr2 , %x[v_row_stride]\n"
-
-      "add wptr1, %x[wptr0], %x[w_row_stride]\n"
-      "add wptr2,    wptr1 , %x[w_row_stride]\n"
-
-      "add u_col_stride2, %x[u_col_stride1], %x[u_col_stride1]\n"
-      "add u_col_stride3,    u_col_stride2 , %x[u_col_stride1]\n"
-      "add u_col_stride4,    u_col_stride3 , %x[u_col_stride1]\n"
-      "add u_col_stride5,    u_col_stride4 , %x[u_col_stride1]\n"
-
-      "add v_col_stride2, %x[v_col_stride1], %x[v_col_stride1]\n"
-      "add v_col_stride3,    v_col_stride2 , %x[v_col_stride1]\n"
-
-      "add w_col_stride2, %x[w_col_stride1], %x[w_col_stride1]\n"
-
-      // Load initial operands
-      "ldr qU16, [%x[uptr0], u_col_stride5]\n"
-      "ldr qW13, [%x[wptr0], w_col_stride2]\n"
-      "subs %x[c4_rem], %x[c4_rem], #1\n"
-      "ldr qU15, [%x[uptr0], u_col_stride4]\n"
-      "ldr qW23, [wptr1, w_col_stride2]\n"
-      "ldr qU14, [%x[uptr0], u_col_stride3]\n"
-      "ldr qW33, [wptr2, w_col_stride2]\n"
-      "ldr qU26, [uptr1, u_col_stride5]\n"
-      "ldr qW12, [%x[wptr0], %x[w_col_stride1]]\n"
-      "ldr qU25, [uptr1, u_col_stride4]\n"
-      "ldr qW22, [wptr1, %x[w_col_stride1]]\n"
-      "ldr qU36, [uptr2, u_col_stride5]\n"
-      "ldr qW32, [wptr2, %x[w_col_stride1]]\n"
-      "ldr qW11, [%x[wptr0]], #0x10\n"
-      "fmul vV14.4s, vU16.4s, vW13.4s\n"
-      "ldr qU24, [uptr1, u_col_stride3]\n"
-      "fmul vV13.4s, vU15.4s, vW13.4s\n"
-      "ldr qW31, [wptr2], #0x10\n"
-      "fmla vV14.4s, vU15.4s, vW12.4s\n"
-      "ldr qW21, [wptr1], #0x10\n"
-      "fmul vV12.4s, vU14.4s, vW13.4s\n"
-      "ldr qU34, [uptr2, u_col_stride3]\n"
-      "fmla vV13.4s, vU14.4s, vW12.4s\n"
-      "ldr qU46, [uptr3, u_col_stride5]\n"
-      "fmla vV14.4s, vU14.4s, vW11.4s\n"
-      "ldr qU45, [uptr3, u_col_stride4]\n"
-      "fmla vV14.4s, vU26.4s, vW23.4s\n"
-      "ldr qU35, [uptr2, u_col_stride4]\n"
-      "fmul vV24.4s, vU26.4s, vW13.4s\n"
-      "ldr qU44, [uptr3, u_col_stride3]\n"
-      "fmla vV13.4s, vU25.4s, vW23.4s\n"
-      "beq 2f\n"  // Single iteration only
-
-      "1:"  // Loop body
-        "fmla vV14.4s, vU25.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[wptr0]]\n"
-        "fmul vV23.4s, vU25.4s, vW13.4s\n"
-        "prfm pldl1keep, [%x[wptr0], %x[w_col_stride1]]\n"
-        "fmla vV24.4s, vU25.4s, vW12.4s\n"
-        "ldr qU56, [uptr4, u_col_stride5]\n"
-        "fmla vV12.4s, vU24.4s, vW23.4s\n"
-        "prfm pldl1keep, [%x[wptr0],    w_col_stride2 ]\n"
-        "fmla vV13.4s, vU24.4s, vW22.4s\n"
-        "prfm pldl1keep, [   wptr1 ]\n"
-        "fmla vV14.4s, vU24.4s, vW21.4s\n"
-        "prfm pldl1keep, [   wptr1 , %x[w_col_stride1]]\n"
-        "fmul vV22.4s, vU24.4s, vW13.4s\n"
-        "prfm pldl1keep, [   wptr1 ,    w_col_stride2 ]\n"
-        "fmla vV23.4s, vU24.4s, vW12.4s\n"
-        "prfm pldl1keep, [   wptr2 ]\n"
-        "fmla vV24.4s, vU24.4s, vW11.4s\n"
-        "ldr qU55, [uptr4, u_col_stride4]\n"
-        "fmla vV14.4s, vU36.4s, vW33.4s\n"
-        "prfm pldl1keep, [   wptr2 , %x[w_col_stride1]]\n"
-        "fmla vV24.4s, vU36.4s, vW23.4s\n"
-        "prfm pldl1keep, [   wptr2 ,    w_col_stride2 ]\n"
-        "fmul vV34.4s, vU36.4s, vW13.4s\n"
-        "ldr qU54, [uptr4, u_col_stride3]\n"
-        "fmla vV13.4s, vU35.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr2 , %x[u_col_stride1]]\n"
-        "fmla vV14.4s, vU35.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr2 ,    u_col_stride2 ]\n"
-        "fmla vV23.4s, vU35.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr2 ,    u_col_stride3 ]\n"
-        "fmla vV24.4s, vU35.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr2 ,    u_col_stride4 ]\n"
-        "fmul vV33.4s, vU35.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr2 ,    u_col_stride5 ]\n"
-        "fmla vV34.4s, vU35.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, u_col_stride5]\n"
-        "fmla vV12.4s, vU34.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr3 ]\n"
-        "fmla vV13.4s, vU34.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr3 , %x[u_col_stride1]]\n"
-        "fmla vV14.4s, vU34.4s, vW31.4s\n"
-        "str qV14, [%x[vptr0], v_col_stride3]\n"
-        "fmla vV22.4s, vU34.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr3 ,    u_col_stride2 ]\n"
-        "fmla vV23.4s, vU34.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr3 ,    u_col_stride3 ]\n"
-        "fmla vV24.4s, vU34.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr3 ,    u_col_stride4 ]\n"
-        "fmul vV32.4s, vU34.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr3 ,    u_col_stride5 ]\n"
-        "fmla vV33.4s, vU34.4s, vW12.4s\n"
-        "prfm pldl1keep, [   uptr4 ]\n"
-        "fmla vV34.4s, vU34.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, u_col_stride4]\n"
-        "fmla vV24.4s, vU46.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr4 , %x[u_col_stride1]]\n"
-        "fmla vV34.4s, vU46.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr4 ,    u_col_stride2 ]\n"
-        "fmul vV44.4s, vU46.4s, vW13.4s\n"
-        "ldr qU64, [uptr5, u_col_stride3]\n"
-        "fmla vV23.4s, vU45.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr4 ,    u_col_stride3 ]\n"
-        "fmla vV24.4s, vU45.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr4 ,    u_col_stride4 ]\n"
-        "fmla vV33.4s, vU45.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr4 ,    u_col_stride5 ]\n"
-        "fmla vV34.4s, vU45.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr5 ]\n"
-        "fmul vV43.4s, vU45.4s, vW13.4s\n"
-        "prfm pldl1keep, [   uptr5 , %x[u_col_stride1]]\n"
-        "fmla vV44.4s, vU45.4s, vW12.4s\n"
-        "ldr qU13, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV22.4s, vU44.4s, vW33.4s\n"
-        "prfm pldl1keep, [   uptr5 ,    u_col_stride2 ]\n"
-        "fmla vV23.4s, vU44.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr5 ,    u_col_stride3 ]\n"
-        "fmla vV24.4s, vU44.4s, vW31.4s\n"
-        "str qV24, [vptr1, v_col_stride3]\n"
-        "fmla vV32.4s, vU44.4s, vW23.4s\n"
-        "prfm pldl1keep, [   uptr5 ,    u_col_stride4 ]\n"
-        "fmla vV33.4s, vU44.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr5 ,    u_col_stride5 ]\n"
-        "fmla vV34.4s, vU44.4s, vW21.4s\n"
-        "prfm pstl1keep, [%x[vptr0]]\n"
-        "fmul vV42.4s, vU44.4s, vW13.4s\n"
-        "prfm pstl1keep, [%x[vptr0], %x[v_col_stride1]]\n"
-        "fmla vV43.4s, vU44.4s, vW12.4s\n"
-        "prfm pstl1keep, [%x[vptr0],    v_col_stride2 ]\n"
-        "fmla vV44.4s, vU44.4s, vW11.4s\n"
-        "ldr qU23, [uptr1, u_col_stride2]\n"
-        "fmla vV34.4s, vU56.4s, vW33.4s\n"
-        "prfm pstl1keep, [%x[vptr0],    v_col_stride3 ]\n"
-        "fmla vV44.4s, vU56.4s, vW23.4s\n"
-        "ldr qU33, [uptr2, u_col_stride2]\n"
-        "fmla vV33.4s, vU55.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr1 ]\n"
-        "fmla vV34.4s, vU55.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr1 , %x[v_col_stride1]]\n"
-        "fmla vV43.4s, vU55.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr1 ,    v_col_stride2 ]\n"
-        "fmla vV44.4s, vU55.4s, vW22.4s\n"
-        "ldr qU43, [uptr3, u_col_stride2]\n"
-        "fmla vV32.4s, vU54.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr1 ,    v_col_stride3 ]\n"
-        "fmla vV33.4s, vU54.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr2 ]\n"
-        "fmla vV34.4s, vU54.4s, vW31.4s\n"
-        "str qV34, [vptr2, v_col_stride3]\n"
-        "fmla vV42.4s, vU54.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr2 , %x[v_col_stride1]]\n"
-        "fmla vV43.4s, vU54.4s, vW22.4s\n"
-        "prfm pstl1keep, [   vptr2 ,    v_col_stride2 ]\n"
-        "fmla vV44.4s, vU54.4s, vW21.4s\n"
-        "ldr qU53, [uptr4, u_col_stride2]\n"
-        "fmla vV44.4s, vU66.4s, vW33.4s\n"
-        "ldr qU63, [uptr5, u_col_stride2]\n"
-        "fmla vV43.4s, vU65.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr2 ,    v_col_stride3 ]\n"
-        "fmla vV44.4s, vU65.4s, vW32.4s\n"
-        "ldr qU12, [%x[uptr0], %x[u_col_stride1]]\n"
-        "fmla vV42.4s, vU64.4s, vW33.4s\n"
-        "prfm pstl1keep, [   vptr3 ]\n"
-        "fmla vV43.4s, vU64.4s, vW32.4s\n"
-        "prfm pstl1keep, [   vptr3 , %x[v_col_stride1]]\n"
-        "fmla vV44.4s, vU64.4s, vW31.4s\n"
-        "str qV44, [vptr3, v_col_stride3]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "ldr qU22, [uptr1, %x[u_col_stride1]]\n"
-        "fmla vV12.4s, vU13.4s, vW12.4s\n"
-        "prfm pstl1keep, [   vptr3 ,    v_col_stride2 ]\n"
-        "fmla vV13.4s, vU13.4s, vW11.4s\n"
-        "ldr qU32, [uptr2, %x[u_col_stride1]]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "prfm pstl1keep, [   vptr3 ,    v_col_stride3 ]\n"
-        "fmla vV12.4s, vU23.4s, vW22.4s\n"
-        "fmla vV13.4s, vU23.4s, vW21.4s\n"
-        "fmul vV21.4s, vU23.4s, vW13.4s\n"
-        "fmla vV22.4s, vU23.4s, vW12.4s\n"
-        "fmla vV23.4s, vU23.4s, vW11.4s\n"
-        "ldr qU42, [uptr3, %x[u_col_stride1]]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "fmla vV12.4s, vU33.4s, vW32.4s\n"
-        "fmla vV13.4s, vU33.4s, vW31.4s\n"
-        "str qV13, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21.4s, vU33.4s, vW23.4s\n"
-        "fmla vV22.4s, vU33.4s, vW22.4s\n"
-        "fmla vV23.4s, vU33.4s, vW21.4s\n"
-        "fmul vV31.4s, vU33.4s, vW13.4s\n"
-        "fmla vV32.4s, vU33.4s, vW12.4s\n"
-        "fmla vV33.4s, vU33.4s, vW11.4s\n"
-        "ldr qU52, [uptr4, %x[u_col_stride1]]\n"
-        "fmla vV21.4s, vU43.4s, vW33.4s\n"
-        "fmla vV22.4s, vU43.4s, vW32.4s\n"
-        "fmla vV23.4s, vU43.4s, vW31.4s\n"
-        "str qV23, [vptr1, v_col_stride2]\n"
-        "fmla vV31.4s, vU43.4s, vW23.4s\n"
-        "fmla vV32.4s, vU43.4s, vW22.4s\n"
-        "fmla vV33.4s, vU43.4s, vW21.4s\n"
-        "fmul vV41.4s, vU43.4s, vW13.4s\n"
-        "ldr qW13, [%x[wptr0], w_col_stride2]\n"
-        "fmla vV42.4s, vU43.4s, vW12.4s\n"
-        "fmla vV43.4s, vU43.4s, vW11.4s\n"
-        "ldr qU62, [uptr5, %x[u_col_stride1]]\n"
-        "fmla vV31.4s, vU53.4s, vW33.4s\n"
-        "fmla vV32.4s, vU53.4s, vW32.4s\n"
-        "fmla vV33.4s, vU53.4s, vW31.4s\n"
-        "str qV33, [vptr2, v_col_stride2]\n"
-        "fmla vV41.4s, vU53.4s, vW23.4s\n"
-        "ldr qW23, [wptr1, w_col_stride2]\n"
-        "fmla vV42.4s, vU53.4s, vW22.4s\n"
-        "fmla vV43.4s, vU53.4s, vW21.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV41.4s, vU63.4s, vW33.4s\n"
-        "ldr qW33, [wptr2, w_col_stride2]\n"
-        "fmla vV42.4s, vU63.4s, vW32.4s\n"
-        "prfm pldl1keep, [%x[uptr0]]\n"
-        "fmla vV43.4s, vU63.4s, vW31.4s\n"
-        "str qV43, [vptr3, v_col_stride2]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV12.4s, vU12.4s, vW11.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[uptr0], %x[u_col_stride1]]\n"
-        "fmla vV12.4s, vU22.4s, vW21.4s\n"
-        "prfm pldl1keep, [%x[uptr0],    u_col_stride2 ]\n"
-        "fmla vV21.4s, vU22.4s, vW12.4s\n"
-        "prfm pldl1keep, [%x[uptr0],    u_col_stride3 ]\n"
-        "fmla vV22.4s, vU22.4s, vW11.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "prfm pldl1keep, [%x[uptr0],    u_col_stride4 ]\n"
-        "fmla vV12.4s, vU32.4s, vW31.4s\n"
-        "str qV12, [%x[vptr0], %x[v_col_stride1]]\n"
-        "fmla vV21.4s, vU32.4s, vW22.4s\n"
-        "prfm pldl1keep, [%x[uptr0],    u_col_stride5 ]\n"
-        "fmla vV22.4s, vU32.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr1 ]\n"
-        "fmla vV31.4s, vU32.4s, vW12.4s\n"
-        "prfm pldl1keep, [   uptr1 , %x[u_col_stride1]]\n"
-        "fmla vV32.4s, vU32.4s, vW11.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr1 ,    u_col_stride2 ]\n"
-        "fmla vV22.4s, vU42.4s, vW31.4s\n"
-        "str qV22, [vptr1, %x[v_col_stride1]]\n"
-        "fmla vV31.4s, vU42.4s, vW22.4s\n"
-        "prfm pldl1keep, [   uptr1 ,    u_col_stride3 ]\n"
-        "fmla vV32.4s, vU42.4s, vW21.4s\n"
-        "subs %x[c4_rem], %x[c4_rem], #1\n"
-        "fmla vV41.4s, vU42.4s, vW12.4s\n"
-        "ldr qW12, [%x[wptr0], %x[w_col_stride1]]\n"
-        "fmla vV42.4s, vU42.4s, vW11.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV31.4s, vU52.4s, vW32.4s\n"
-        "prfm pldl1keep, [   uptr1 ,    u_col_stride4 ]\n"
-        "fmla vV32.4s, vU52.4s, vW31.4s\n"
-        "str qV32, [vptr2, %x[v_col_stride1]]\n"
-        "fmla vV41.4s, vU52.4s, vW22.4s\n"
-        "ldr qW22, [wptr1, %x[w_col_stride1]]\n"
-        "fmla vV42.4s, vU52.4s, vW21.4s\n"
-        "ldr qU16, [%x[uptr0], u_col_stride5]\n"
-        "fmla vV41.4s, vU62.4s, vW32.4s\n"
-        "ldr qW32, [wptr2, %x[w_col_stride1]]\n"
-        "fmla vV42.4s, vU62.4s, vW31.4s\n"
-        "str qV42, [vptr3, %x[v_col_stride1]]\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "ldr qU15, [%x[uptr0], u_col_stride4]\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "ldr qU14, [%x[uptr0], u_col_stride3]\n"
-        "fmla vV21.4s, vU21.4s, vW11.4s\n"
-        "ldr qU26, [uptr1, u_col_stride5]\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU31.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr1 ,    u_col_stride5 ]\n"
-        "fmla vV31.4s, vU31.4s, vW11.4s\n"
-        "ldr qU25, [uptr1, u_col_stride4]\n"
-        "fmla vV21.4s, vU41.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU41.4s, vW21.4s\n"
-        "prfm pldl1keep, [   uptr2 ]\n"
-        "fmla vV41.4s, vU41.4s, vW11.4s\n"
-        "ldr qW11, [%x[wptr0]], #0x10\n"
-        "fmla vV31.4s, vU51.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-        "fmla vV41.4s, vU51.4s, vW21.4s\n"
-        "ldr qU36, [uptr2, u_col_stride5]\n"
-        "fmla vV41.4s, vU61.4s, vW31.4s\n"
-        "str qV41, [vptr3], #0x10\n"
-        "fmul vV14.4s, vU16.4s, vW13.4s\n"
-        "ldr qU24, [uptr1, u_col_stride3]\n"
-        "fmul vV13.4s, vU15.4s, vW13.4s\n"
-        "ldr qW31, [wptr2], #0x10\n"
-        "fmla vV14.4s, vU15.4s, vW12.4s\n"
-        "ldr qW21, [wptr1], #0x10\n"
-        "fmul vV12.4s, vU14.4s, vW13.4s\n"
-        "ldr qU34, [uptr2, u_col_stride3]\n"
-        "fmla vV13.4s, vU14.4s, vW12.4s\n"
-        "ldr qU46, [uptr3, u_col_stride5]\n"
-        "fmla vV14.4s, vU14.4s, vW11.4s\n"
-        "ldr qU45, [uptr3, u_col_stride4]\n"
-        "fmla vV14.4s, vU26.4s, vW23.4s\n"
-        "ldr qU35, [uptr2, u_col_stride4]\n"
-        "fmul vV24.4s, vU26.4s, vW13.4s\n"
-        "ldr qU44, [uptr3, u_col_stride3]\n"
-        "fmla vV13.4s, vU25.4s, vW23.4s\n"
-        "bne 1b\n"
-
-      "2:"  // Final iteration
-        "fmla vV14.4s, vU25.4s, vW22.4s\n"
-        "fmul vV23.4s, vU25.4s, vW13.4s\n"
-        "fmla vV24.4s, vU25.4s, vW12.4s\n"
-        "ldr qU56, [uptr4, u_col_stride5]\n"
-        "fmla vV12.4s, vU24.4s, vW23.4s\n"
-        "fmla vV13.4s, vU24.4s, vW22.4s\n"
-        "fmla vV14.4s, vU24.4s, vW21.4s\n"
-        "fmul vV22.4s, vU24.4s, vW13.4s\n"
-        "fmla vV23.4s, vU24.4s, vW12.4s\n"
-        "fmla vV24.4s, vU24.4s, vW11.4s\n"
-        "ldr qU55, [uptr4, u_col_stride4]\n"
-        "fmla vV14.4s, vU36.4s, vW33.4s\n"
-        "fmla vV24.4s, vU36.4s, vW23.4s\n"
-        "fmul vV34.4s, vU36.4s, vW13.4s\n"
-        "ldr qU54, [uptr4, u_col_stride3]\n"
-        "fmla vV13.4s, vU35.4s, vW33.4s\n"
-        "fmla vV14.4s, vU35.4s, vW32.4s\n"
-        "fmla vV23.4s, vU35.4s, vW23.4s\n"
-        "fmla vV24.4s, vU35.4s, vW22.4s\n"
-        "fmul vV33.4s, vU35.4s, vW13.4s\n"
-        "fmla vV34.4s, vU35.4s, vW12.4s\n"
-        "ldr qU66, [uptr5, u_col_stride5]\n"
-        "fmla vV12.4s, vU34.4s, vW33.4s\n"
-        "fmla vV13.4s, vU34.4s, vW32.4s\n"
-        "fmla vV14.4s, vU34.4s, vW31.4s\n"
-        "str qV14, [%x[vptr0], v_col_stride3]\n"
-        "fmla vV22.4s, vU34.4s, vW23.4s\n"
-        "fmla vV23.4s, vU34.4s, vW22.4s\n"
-        "fmla vV24.4s, vU34.4s, vW21.4s\n"
-        "fmul vV32.4s, vU34.4s, vW13.4s\n"
-        "fmla vV33.4s, vU34.4s, vW12.4s\n"
-        "fmla vV34.4s, vU34.4s, vW11.4s\n"
-        "ldr qU65, [uptr5, u_col_stride4]\n"
-        "fmla vV24.4s, vU46.4s, vW33.4s\n"
-        "fmla vV34.4s, vU46.4s, vW23.4s\n"
-        "fmul vV44.4s, vU46.4s, vW13.4s\n"
-        "ldr qU64, [uptr5, u_col_stride3]\n"
-        "fmla vV23.4s, vU45.4s, vW33.4s\n"
-        "fmla vV24.4s, vU45.4s, vW32.4s\n"
-        "fmla vV33.4s, vU45.4s, vW23.4s\n"
-        "fmla vV34.4s, vU45.4s, vW22.4s\n"
-        "fmul vV43.4s, vU45.4s, vW13.4s\n"
-        "fmla vV44.4s, vU45.4s, vW12.4s\n"
-        "ldr qU13, [%x[uptr0], u_col_stride2]\n"
-        "fmla vV22.4s, vU44.4s, vW33.4s\n"
-        "fmla vV23.4s, vU44.4s, vW32.4s\n"
-        "fmla vV24.4s, vU44.4s, vW31.4s\n"
-        "str qV24, [vptr1, v_col_stride3]\n"
-        "fmla vV32.4s, vU44.4s, vW23.4s\n"
-        "fmla vV33.4s, vU44.4s, vW22.4s\n"
-        "fmla vV34.4s, vU44.4s, vW21.4s\n"
-        "fmul vV42.4s, vU44.4s, vW13.4s\n"
-        "fmla vV43.4s, vU44.4s, vW12.4s\n"
-        "fmla vV44.4s, vU44.4s, vW11.4s\n"
-        "ldr qU23, [uptr1, u_col_stride2]\n"
-        "fmla vV34.4s, vU56.4s, vW33.4s\n"
-        "fmla vV44.4s, vU56.4s, vW23.4s\n"
-        "ldr qU33, [uptr2, u_col_stride2]\n"
-        "fmla vV33.4s, vU55.4s, vW33.4s\n"
-        "fmla vV34.4s, vU55.4s, vW32.4s\n"
-        "fmla vV43.4s, vU55.4s, vW23.4s\n"
-        "fmla vV44.4s, vU55.4s, vW22.4s\n"
-        "ldr qU43, [uptr3, u_col_stride2]\n"
-        "fmla vV32.4s, vU54.4s, vW33.4s\n"
-        "fmla vV33.4s, vU54.4s, vW32.4s\n"
-        "fmla vV34.4s, vU54.4s, vW31.4s\n"
-        "str qV34, [vptr2, v_col_stride3]\n"
-        "fmla vV42.4s, vU54.4s, vW23.4s\n"
-        "fmla vV43.4s, vU54.4s, vW22.4s\n"
-        "fmla vV44.4s, vU54.4s, vW21.4s\n"
-        "ldr qU53, [uptr4, u_col_stride2]\n"
-        "fmla vV44.4s, vU66.4s, vW33.4s\n"
-        "ldr qU63, [uptr5, u_col_stride2]\n"
-        "fmla vV43.4s, vU65.4s, vW33.4s\n"
-        "fmla vV44.4s, vU65.4s, vW32.4s\n"
-        "ldr qU12, [%x[uptr0], %x[u_col_stride1]]\n"
-        "fmla vV42.4s, vU64.4s, vW33.4s\n"
-        "fmla vV43.4s, vU64.4s, vW32.4s\n"
-        "fmla vV44.4s, vU64.4s, vW31.4s\n"
-        "str qV44, [vptr3, v_col_stride3]\n"
-        "fmul vV11.4s, vU13.4s, vW13.4s\n"
-        "ldr qU22, [uptr1, %x[u_col_stride1]]\n"
-        "fmla vV12.4s, vU13.4s, vW12.4s\n"
-        "fmla vV13.4s, vU13.4s, vW11.4s\n"
-        "ldr qU32, [uptr2, %x[u_col_stride1]]\n"
-        "fmla vV11.4s, vU23.4s, vW23.4s\n"
-        "fmla vV12.4s, vU23.4s, vW22.4s\n"
-        "fmla vV13.4s, vU23.4s, vW21.4s\n"
-        "fmul vV21.4s, vU23.4s, vW13.4s\n"
-        "fmla vV22.4s, vU23.4s, vW12.4s\n"
-        "fmla vV23.4s, vU23.4s, vW11.4s\n"
-        "ldr qU42, [uptr3, %x[u_col_stride1]]\n"
-        "fmla vV11.4s, vU33.4s, vW33.4s\n"
-        "fmla vV12.4s, vU33.4s, vW32.4s\n"
-        "fmla vV13.4s, vU33.4s, vW31.4s\n"
-        "str qV13, [%x[vptr0], v_col_stride2]\n"
-        "fmla vV21.4s, vU33.4s, vW23.4s\n"
-        "fmla vV22.4s, vU33.4s, vW22.4s\n"
-        "fmla vV23.4s, vU33.4s, vW21.4s\n"
-        "fmul vV31.4s, vU33.4s, vW13.4s\n"
-        "fmla vV32.4s, vU33.4s, vW12.4s\n"
-        "fmla vV33.4s, vU33.4s, vW11.4s\n"
-        "ldr qU52, [uptr4, %x[u_col_stride1]]\n"
-        "fmla vV21.4s, vU43.4s, vW33.4s\n"
-        "fmla vV22.4s, vU43.4s, vW32.4s\n"
-        "fmla vV23.4s, vU43.4s, vW31.4s\n"
-        "str qV23, [vptr1, v_col_stride2]\n"
-        "fmla vV31.4s, vU43.4s, vW23.4s\n"
-        "fmla vV32.4s, vU43.4s, vW22.4s\n"
-        "fmla vV33.4s, vU43.4s, vW21.4s\n"
-        "fmul vV41.4s, vU43.4s, vW13.4s\n"
-        "fmla vV42.4s, vU43.4s, vW12.4s\n"
-        "fmla vV43.4s, vU43.4s, vW11.4s\n"
-        "ldr qU62, [uptr5, %x[u_col_stride1]]\n"
-        "fmla vV31.4s, vU53.4s, vW33.4s\n"
-        "fmla vV32.4s, vU53.4s, vW32.4s\n"
-        "fmla vV33.4s, vU53.4s, vW31.4s\n"
-        "str qV33, [vptr2, v_col_stride2]\n"
-        "fmla vV41.4s, vU53.4s, vW23.4s\n"
-        "fmla vV42.4s, vU53.4s, vW22.4s\n"
-        "fmla vV43.4s, vU53.4s, vW21.4s\n"
-        "ldr qU11, [%x[uptr0]], #0x10\n"
-        "fmla vV41.4s, vU63.4s, vW33.4s\n"
-        "fmla vV42.4s, vU63.4s, vW32.4s\n"
-        "fmla vV43.4s, vU63.4s, vW31.4s\n"
-        "str qV43, [vptr3, v_col_stride2]\n"
-        "fmla vV11.4s, vU12.4s, vW12.4s\n"
-        "ldr qU21, [uptr1], #0x10\n"
-        "fmla vV12.4s, vU12.4s, vW11.4s\n"
-        "ldr qU31, [uptr2], #0x10\n"
-        "fmla vV11.4s, vU22.4s, vW22.4s\n"
-        "fmla vV12.4s, vU22.4s, vW21.4s\n"
-        "fmla vV21.4s, vU22.4s, vW12.4s\n"
-        "fmla vV22.4s, vU22.4s, vW11.4s\n"
-        "ldr qU41, [uptr3], #0x10\n"
-        "fmla vV11.4s, vU32.4s, vW32.4s\n"
-        "fmla vV12.4s, vU32.4s, vW31.4s\n"
-        "str qV12, [%x[vptr0], %x[v_col_stride1]]\n"
-        "fmla vV21.4s, vU32.4s, vW22.4s\n"
-        "fmla vV22.4s, vU32.4s, vW21.4s\n"
-        "fmla vV31.4s, vU32.4s, vW12.4s\n"
-        "fmla vV32.4s, vU32.4s, vW11.4s\n"
-        "ldr qU51, [uptr4], #0x10\n"
-        "fmla vV21.4s, vU42.4s, vW32.4s\n"
-        "fmla vV22.4s, vU42.4s, vW31.4s\n"
-        "str qV22, [vptr1, %x[v_col_stride1]]\n"
-        "fmla vV31.4s, vU42.4s, vW22.4s\n"
-        "fmla vV32.4s, vU42.4s, vW21.4s\n"
-        "subs %x[c4_rem], %x[c4_rem], #1\n"
-        "fmla vV41.4s, vU42.4s, vW12.4s\n"
-        "fmla vV42.4s, vU42.4s, vW11.4s\n"
-        "ldr qU61, [uptr5], #0x10\n"
-        "fmla vV31.4s, vU52.4s, vW32.4s\n"
-        "fmla vV32.4s, vU52.4s, vW31.4s\n"
-        "str qV32, [vptr2, %x[v_col_stride1]]\n"
-        "fmla vV41.4s, vU52.4s, vW22.4s\n"
-        "fmla vV42.4s, vU52.4s, vW21.4s\n"
-        "fmla vV41.4s, vU62.4s, vW32.4s\n"
-        "fmla vV42.4s, vU62.4s, vW31.4s\n"
-        "str qV42, [vptr3, %x[v_col_stride1]]\n"
-        "fmla vV11.4s, vU11.4s, vW11.4s\n"
-        "fmla vV11.4s, vU21.4s, vW21.4s\n"
-        "fmla vV21.4s, vU21.4s, vW11.4s\n"
-        "fmla vV11.4s, vU31.4s, vW31.4s\n"
-        "str qV11, [%x[vptr0]], #0x10\n"
-        "fmla vV21.4s, vU31.4s, vW21.4s\n"
-        "fmla vV31.4s, vU31.4s, vW11.4s\n"
-        "fmla vV21.4s, vU41.4s, vW31.4s\n"
-        "str qV21, [vptr1], #0x10\n"
-        "fmla vV31.4s, vU41.4s, vW21.4s\n"
-        "fmla vV41.4s, vU41.4s, vW11.4s\n"
-        "fmla vV31.4s, vU51.4s, vW31.4s\n"
-        "str qV31, [vptr2], #0x10\n"
-        "fmla vV41.4s, vU51.4s, vW21.4s\n"
-        "fmla vV41.4s, vU61.4s, vW31.4s\n"
-        "str qV41, [vptr3], #0x10\n"
-
-      ".unreq qW22\n" ".unreq qU64\n" ".unreq qU35\n" ".unreq qV41\n"
-      ".unreq qU34\n" ".unreq qU21\n" ".unreq qV43\n" ".unreq qW21\n"
-      ".unreq qU24\n" ".unreq qU54\n" ".unreq qV31\n" ".unreq qV12\n"
-      ".unreq qU61\n" ".unreq qU26\n" ".unreq qV32\n"
-      ".unreq qU36\n" ".unreq qU51\n" ".unreq qU66\n" ".unreq qU12\n"
-      ".unreq qV14\n" ".unreq qV11\n" ".unreq qU65\n"
-      ".unreq qU15\n" ".unreq qU22\n" ".unreq qU45\n"
-      ".unreq qV22\n" ".unreq qU14\n"
-      ".unreq qU44\n" ".unreq qU43\n" ".unreq qU11\n"
-      ".unreq qV24\n" ".unreq qV42\n" ".unreq qW31\n" ".unreq qW13\n"
-      ".unreq qU33\n" ".unreq qU62\n" ".unreq qU25\n" ".unreq qU56\n"
-      ".unreq qW33\n"
-      ".unreq qU42\n" ".unreq qU16\n" ".unreq qV44\n"
-      ".unreq qU63\n" ".unreq qU31\n" ".unreq qV34\n"
-      ".unreq qW11\n" ".unreq qU41\n" ".unreq qV13\n" ".unreq qV33\n"
-      ".unreq qU46\n" ".unreq qU32\n" ".unreq qU13\n"
-      ".unreq qW23\n" ".unreq qV23\n" ".unreq qV21\n" ".unreq qU55\n"
-      ".unreq qW12\n" ".unreq qW32\n" ".unreq qU23\n" ".unreq qU52\n"
-      ".unreq qU53\n" ".unreq vW22\n"
-      ".unreq vU64\n" ".unreq vU35\n" ".unreq vV41\n"
-      ".unreq vU34\n" ".unreq vU21\n" ".unreq vV43\n" ".unreq vW21\n"
-      ".unreq vU24\n" ".unreq vU54\n" ".unreq vV31\n"
-      ".unreq vV12\n" ".unreq vU61\n"
-      ".unreq vU26\n" ".unreq vV32\n"
-      ".unreq vU36\n" ".unreq vU51\n" ".unreq vU66\n" ".unreq vU12\n"
-      ".unreq vV14\n" ".unreq vV11\n" ".unreq vU65\n"
-      ".unreq vU15\n" ".unreq vU22\n" ".unreq vU45\n"
-      ".unreq vV22\n" ".unreq vU14\n"
-      ".unreq vU44\n" ".unreq vU43\n" ".unreq vU11\n"
-      ".unreq vV24\n" ".unreq vV42\n" ".unreq vW31\n" ".unreq vW13\n"
-      ".unreq vU33\n" ".unreq vU62\n" ".unreq vU25\n" ".unreq vU56\n"
-      ".unreq vW33\n" ".unreq vU42\n" ".unreq vU16\n" ".unreq vV44\n"
-      ".unreq vU63\n" ".unreq vU31\n" ".unreq vV34\n" ".unreq vW11\n"
-      ".unreq vU41\n" ".unreq vV13\n" ".unreq vV33\n"
-      ".unreq vU46\n" ".unreq vU32\n" ".unreq vU13\n" ".unreq vW23\n"
-      ".unreq vV23\n" ".unreq vV21\n" ".unreq vU55\n" ".unreq vW12\n"
-      ".unreq vW32\n" ".unreq vU23\n" ".unreq vU52\n" ".unreq vU53\n"
-      : [uptr0] "+r" (uptr0), [vptr0] "+r" (vptr0), [wptr0] "+r" (wptr0),
-        [c4_rem] "+r" (c4_rem)
-      : [u_row_stride] "r" (in_row_stride * sizeof(float)),
-        [u_col_stride1] "r" (in_col_stride * sizeof(float)),
-        [v_row_stride] "r" (out_row_stride * sizeof(float)),
-        [v_col_stride1] "r" (out_col_stride * sizeof(float)),
-        [w_row_stride] "r" (weight_row_stride * sizeof(float)),
-        [w_col_stride1] "r" (weight_col_stride * sizeof(float))
-      : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
-        "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
-        "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x0",
-        "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
-        "x12", "x13", "x14", "x15", "x16", "cc", "memory"
-    );
-  }
-  for (; channels_remaining; channels_remaining--)
-  {
-    // Load input tile
-    float u[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
-    {
-      const float* const inptr_row = uptr0 + (i - in_pad_top)*in_row_stride;
-      for (int j = 0; j < inner_tile_cols; j++)
-      {
-        if (i < in_pad_top || in_cells_i <= i ||
-            j < in_pad_left || in_cells_j <= j)
-        {
-          u[i][j] = static_cast<float>(0);
-        }
-        else
-        {
-          u[i][j] = *(inptr_row + (j - in_pad_left)*in_col_stride);
-        }
-      }
-    }
-    uptr0++;
-
-    // Load weights tile
-    float w[kernel_rows][kernel_cols];
-    for (int i = 0; i < kernel_rows; i++)
-    {
-      const float* const wptr_row = wptr0 + i*weight_row_stride;
-      for (int j = 0; j < kernel_cols; j++)
-      {
-        w[i][j] = *(wptr_row + j*weight_col_stride);
-      }
-    }
-    wptr0++;
-
-    // Perform the convolution
-    float v[output_tile_rows][output_tile_cols];
-    for (int out_i = 0; out_i < out_cells_i; out_i++)
-    {
-      for (int out_j = 0; out_j < out_cells_j; out_j++)
-      {
-        // Clear the accumulator
-        v[out_i][out_j] = static_cast<float>(0);
-
-        // Base co-ordinate
-        const int base_i = out_i * stride_rows;
-        const int base_j = out_j * stride_cols;
-
-        // Fill the accumulator
-        for (int in_i = 0; in_i < kernel_rows; in_i++)
-        {
-          const int i = base_i + in_i;
-          for (int in_j = 0; in_j < kernel_cols; in_j++)
-          {
-            const int j = base_j + in_j;
-            v[out_i][out_j] += w[in_i][in_j] * u[i][j];
-          }
-        }
-      }
-    }
-
-    // Store the output tile
-    for (int i = 0; i < out_cells_i; i++)
-    {
-      float* const outptr_row = vptr0 + i*out_row_stride;
-      for (int j = 0; j < out_cells_j; j++)
-      {
-        *(outptr_row + j*out_col_stride) = v[i][j];
-      }
-    }
-    vptr0++;
-  }
+template <>
+template <>
+void Conv::execute_tile<ActivationFunction::ReLU6>(
+  int n_channels,
+  const void *weight_bias_ptr,
+  const float *input,
+  const unsigned int input_row_stride,
+  const unsigned int input_col_stride,
+  float *output,
+  const unsigned int output_row_stride,
+  const unsigned int output_col_stride
+)
+{
+  __asm __volatile(
+    "add x24, %[inptr0], %[input_row_stride]\n"
+    "add x13, %[input_col_stride1], %[input_col_stride1]\n"
+    "add x8, %[outptr0], %[output_row_stride]\n"
+    "add x9, x24, %[input_row_stride]\n"
+    "add x10, x13, #64\n"
+    "add x19, x13, %[input_col_stride1]\n"
+    "add x20, x9, %[input_row_stride]\n"
+    "add x21, x19, #64\n"
+    "add x17, x19, %[input_col_stride1]\n"
+    "add x22, x20, %[input_row_stride]\n"
+    "add x18, x17, #64\n"
+    "add x11, x17, %[input_col_stride1]\n"
+    "add x23, x22, %[input_row_stride]\n"
+    "add x12, x11, #64\n"
+    "add x25, x8, %[output_row_stride]\n"
+    "add x26, x25, %[output_row_stride]\n"
+    "add x27, %[output_col_stride1], %[output_col_stride1]\n"
+    "and x14, %[n_channels], #3\n"
+    "add x28, x27, %[output_col_stride1]\n"
+    "lsr x15, %[n_channels], #2\n"
+    "cbz x15, 4f\n"
+    "1:\n"
+    "ldr q23, [%[wbptr]]\n"
+    "subs x15, x15, #1\n"
+    "mov v12.16b, v23.16b\n"
+    "ldr q20, [%[wbptr], #16]\n"
+    "mov v8.16b, v23.16b\n"
+    "ldr q6, [%[wbptr], #32]\n"
+    "mov v11.16b, v23.16b\n"
+    "ldr q5, [%[wbptr], #48]\n"
+    "mov v16.16b, v23.16b\n"
+    "ldr q19, [%[wbptr], #64]\n"
+    "mov v7.16b, v23.16b\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "mov v10.16b, v23.16b\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "mov v14.16b, v23.16b\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "mov v15.16b, v23.16b\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "mov v17.16b, v23.16b\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "mov v9.16b, v23.16b\n"
+    "ldr q28, [%[inptr0]]\n"
+    "fmla v12.4s, v28.4s, v20.4s\n"
+    "ldr q25, [x24]\n"
+    "fmla v8.4s, v25.4s, v20.4s\n"
+    "ldr q18, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v11.4s, v18.4s, v20.4s\n"
+    "ldr q30, [x9]\n"
+    "fmla v12.4s, v25.4s, v19.4s\n"
+    "ldr q29, [x24, %[input_col_stride1]]\n"
+    "fmla v8.4s, v30.4s, v19.4s\n"
+    "ldr q24, [%[inptr0], x13]\n"
+    "fmla v16.4s, v30.4s, v20.4s\n"
+    "ldr q27, [x20]\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "ldr q22, [x9, %[input_col_stride1]]\n"
+    "fmla v8.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v12.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "prfm pldl1keep, [x24, x16]\n"
+    "prfm pldl1keep, [%[inptr0], x10]\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v12.4s, v29.4s, v4.4s\n"
+    "beq 3f\n"
+    "2:\n"
+    "mov v13.16b, v23.16b\n"
+    "ldr q21, [x24, x13]\n"
+    "mov v18.16b, v23.16b\n"
+    "prfm pldl1keep, [x24, x10]\n"
+    "fmla v11.4s, v29.4s, v19.4s\n"
+    "prfm pldl1keep, [%[inptr0], x21]\n"
+    "fmla v7.4s, v29.4s, v20.4s\n"
+    "ldr q25, [%[inptr0], x19]\n"
+    "fmla v12.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v11.4s, v24.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v10.4s, v24.4s, v20.4s\n"
+    "ldr q24, [x22]\n"
+    "fmla v8.4s, v27.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x10]\n"
+    "fmla v16.4s, v27.4s, v19.4s\n"
+    "prfm pldl1keep, [x24, x21]\n"
+    "fmla v14.4s, v27.4s, v20.4s\n"
+    "ldr q26, [x20, %[input_col_stride1]]\n"
+    "fmla v12.4s, v22.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v8.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v11.4s, v22.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x10]\n"
+    "fmla v7.4s, v22.4s, v19.4s\n"
+    "prfm pldl1keep, [x9, x21]\n"
+    "fmla v15.4s, v22.4s, v20.4s\n"
+    "ldr q30, [x9, x13]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x24, x18]\n"
+    "fmla v8.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x12]\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v7.4s, v21.4s, v6.4s\n"
+    "prfm pldl1keep, [x22, x10]\n"
+    "fmla v10.4s, v21.4s, v19.4s\n"
+    "prfm pldl1keep, [x20, x21]\n"
+    "fmla v17.4s, v21.4s, v20.4s\n"
+    "ldr q22, [x24, x19]\n"
+    "fmla v11.4s, v25.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v10.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x24, x12]\n"
+    "fmla v9.4s, v25.4s, v20.4s\n"
+    "ldr q21, [%[inptr0], x17]\n"
+    "fmla v16.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x10]\n"
+    "fmla v14.4s, v24.4s, v19.4s\n"
+    "ldr q24, [x23]\n"
+    "fmla v8.4s, v26.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x21]\n"
+    "fmla v16.4s, v26.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v7.4s, v26.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x12]\n"
+    "fmla v14.4s, v26.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x21]\n"
+    "fmla v15.4s, v26.4s, v19.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v13.4s, v26.4s, v20.4s\n"
+    "ldr q26, [x22, %[input_col_stride1]]\n"
+    "fmla v12.4s, v30.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, x12]\n"
+    "fmla v8.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v11.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x12]\n"
+    "fmla v16.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x23, x12]\n"
+    "fmla v7.4s, v30.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v10.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "subs x15, x15, #1\n"
+    "fmla v17.4s, v30.4s, v19.4s\n"
+    "fmla v18.4s, v30.4s, v20.4s\n"
+    "mov v25.16b, v23.16b\n"
+    "fmla v11.4s, v22.4s, v3.4s\n"
+    "fmla v7.4s, v22.4s, v5.4s\n"
+    "fmla v10.4s, v22.4s, v4.4s\n"
+    "fmla v17.4s, v22.4s, v6.4s\n"
+    "fmla v9.4s, v22.4s, v19.4s\n"
+    "fmla v25.4s, v22.4s, v20.4s\n"
+    "ldr q27, [x20, x13]\n"
+    "fmla v10.4s, v21.4s, v5.4s\n"
+    "fmla v14.4s, v24.4s, v2.4s\n"
+    "mov v22.16b, v23.16b\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "mov v24.16b, v23.16b\n"
+    "mov v21.16b, v23.16b\n"
+    "fmla v16.4s, v26.4s, v1.4s\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v13.4s, v26.4s, v19.4s\n"
+    "fmla v8.4s, v27.4s, v0.4s\n"
+    "ldr q28, [x9, x19]\n"
+    "fmla v16.4s, v27.4s, v3.4s\n"
+    "fmla v7.4s, v27.4s, v1.4s\n"
+    "fmla v14.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "fmla v13.4s, v27.4s, v6.4s\n"
+    "fmla v18.4s, v27.4s, v19.4s\n"
+    "fmla v22.4s, v27.4s, v20.4s\n"
+    "fmla v11.4s, v28.4s, v0.4s\n"
+    "ldr q29, [x24, x17]\n"
+    "fmla v7.4s, v28.4s, v3.4s\n"
+    "fmla v10.4s, v28.4s, v1.4s\n"
+    "fmla v15.4s, v28.4s, v5.4s\n"
+    "fmla v17.4s, v28.4s, v4.4s\n"
+    "fmla v9.4s, v28.4s, v2.4s\n"
+    "fmla v18.4s, v28.4s, v6.4s\n"
+    "fmla v25.4s, v28.4s, v19.4s\n"
+    "fmla v24.4s, v28.4s, v20.4s\n"
+    "fmla v10.4s, v29.4s, v3.4s\n"
+    "ldr q23, [%[inptr0], x11]\n"
+    "fmla v17.4s, v29.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v9.4s, v29.4s, v4.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v25.4s, v29.4s, v6.4s\n"
+    "ldr q30, [x23, %[input_col_stride1]]\n"
+    "fmla v14.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v9.4s, v23.4s, v5.4s\n"
+    "ldr q23, [x22, x13]\n"
+    "fmla v13.4s, v30.4s, v2.4s\n"
+    "ldr q29, [x20, x19]\n"
+    "fmla v16.4s, v23.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x10]\n"
+    "fmla v14.4s, v23.4s, v3.4s\n"
+    "fmla v15.4s, v23.4s, v1.4s\n"
+    "fmla v13.4s, v23.4s, v4.4s\n"
+    "fmla v18.4s, v23.4s, v2.4s\n"
+    "fmla v22.4s, v23.4s, v19.4s\n"
+    "ldr q23, [x9, x17]\n"
+    "fmla v7.4s, v29.4s, v0.4s\n"
+    "fmla v15.4s, v29.4s, v3.4s\n"
+    "fmla v17.4s, v29.4s, v1.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v18.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v2.4s\n"
+    "fmla v22.4s, v29.4s, v6.4s\n"
+    "fmla v24.4s, v29.4s, v19.4s\n"
+    "fmla v21.4s, v29.4s, v20.4s\n"
+    "ldr q26, [x24, x11]\n"
+    "fmla v10.4s, v23.4s, v0.4s\n"
+    "ldr q28, [x23, x13]\n"
+    "fmla v17.4s, v23.4s, v3.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v9.4s, v23.4s, v1.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v18.4s, v23.4s, v5.4s\n"
+    "prfm pldl1keep, [x24, x16]\n"
+    "fmla v25.4s, v23.4s, v4.4s\n"
+    "fmla v24.4s, v23.4s, v6.4s\n"
+    "fmla v9.4s, v26.4s, v3.4s\n"
+    "ldr q20, [x22, x19]\n"
+    "fmla v14.4s, v28.4s, v0.4s\n"
+    "fmla v13.4s, v28.4s, v1.4s\n"
+    "fmla v25.4s, v26.4s, v5.4s\n"
+    "ldr q26, [x20, x17]\n"
+    "fmla v22.4s, v28.4s, v2.4s\n"
+    "ldr q23, [x9, x11]\n"
+    "fmla v15.4s, v20.4s, v0.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v13.4s, v20.4s, v3.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "fmla v18.4s, v20.4s, v1.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v22.4s, v20.4s, v4.4s\n"
+    "fmla v24.4s, v20.4s, v2.4s\n"
+    "fmla v21.4s, v20.4s, v19.4s\n"
+    "ldr q27, [x23, x19]\n"
+    "fmla v17.4s, v26.4s, v0.4s\n"
+    "ldr q20, [x22, x17]\n"
+    "fmla v18.4s, v26.4s, v3.4s\n"
+    "fmla v25.4s, v26.4s, v1.4s\n"
+    "fmla v22.4s, v26.4s, v5.4s\n"
+    "fmla v24.4s, v26.4s, v4.4s\n"
+    "fmla v21.4s, v26.4s, v6.4s\n"
+    "ldr q19, [x20, x11]\n"
+    "fmla v9.4s, v23.4s, v0.4s\n"
+    "ldr q28, [x23, x17]\n"
+    "fmla v25.4s, v23.4s, v3.4s\n"
+    "add x20, x20, #16\n"
+    "fmla v24.4s, v23.4s, v5.4s\n"
+    "ldr q29, [x22, x11]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "fmla v22.4s, v27.4s, v1.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v21.4s, v27.4s, v2.4s\n"
+    "ldr q30, [x23, x11]\n"
+    "fmla v18.4s, v20.4s, v0.4s\n"
+    "ldr q23, [%[wbptr]]\n"
+    "fmla v22.4s, v20.4s, v3.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v24.4s, v20.4s, v1.4s\n"
+    "fmla v21.4s, v20.4s, v4.4s\n"
+    "fmla v25.4s, v19.4s, v0.4s\n"
+    "ldr q20, [%[wbptr], #16]\n"
+    "fmla v22.4s, v28.4s, v0.4s\n"
+    "ldr q6, [%[wbptr], #32]\n"
+    "fmla v21.4s, v19.4s, v5.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v24.4s, v19.4s, v3.4s\n"
+    "ldr q19, [%[wbptr], #64]\n"
+    "fmax v12.4s, v12.4s, v26.4s\n"
+    "fmax v11.4s, v11.4s, v26.4s\n"
+    "fmla v21.4s, v28.4s, v1.4s\n"
+    "ldr q5, [%[wbptr], #48]\n"
+    "fmla v24.4s, v29.4s, v0.4s\n"
+    "ldr q4, [%[wbptr], #80]\n"
+    "fmax v10.4s, v10.4s, v26.4s\n"
+    "fmax v9.4s, v9.4s, v26.4s\n"
+    "fmla v21.4s, v29.4s, v3.4s\n"
+    "ldr q2, [%[wbptr], #112]\n"
+    "fmov v27.4s, #6.0\n"
+    "fmax v8.4s, v8.4s, v26.4s\n"
+    "fmax v7.4s, v7.4s, v26.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmla v21.4s, v30.4s, v0.4s\n"
+    "ldr q3, [%[wbptr], #96]\n"
+    "fmin v12.4s, v12.4s, v27.4s\n"
+    "ldr q1, [%[wbptr], #128]\n"
+    "fmin v11.4s, v11.4s, v27.4s\n"
+    "fmin v10.4s, v10.4s, v27.4s\n"
+    "str q12, [%[outptr0]]\n"
+    "fmin v9.4s, v9.4s, v27.4s\n"
+    "str q11, [%[outptr0], %[output_col_stride1]]\n"
+    "fmin v8.4s, v8.4s, v27.4s\n"
+    "str q10, [%[outptr0], x27]\n"
+    "fmin v7.4s, v7.4s, v27.4s\n"
+    "str q9, [%[outptr0], x28]\n"
+    "fmin v17.4s, v17.4s, v27.4s\n"
+    "str q8, [x8]\n"
+    "fmax v25.4s, v25.4s, v26.4s\n"
+    "str q7, [x8, %[output_col_stride1]]\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str q17, [x8, x27]\n"
+    "fmin v25.4s, v25.4s, v27.4s\n"
+    "fmin v16.4s, v16.4s, v27.4s\n"
+    "ldr q0, [%[wbptr], #144]\n"
+    "str q25, [x8, x28]\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "str q16, [x25]\n"
+    "fmax v18.4s, v18.4s, v26.4s\n"
+    "fmin v15.4s, v15.4s, v27.4s\n"
+    "ldr q28, [%[inptr0]]\n"
+    "fmin v18.4s, v18.4s, v27.4s\n"
+    "ldr q25, [x24]\n"
+    "str q15, [x25, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v26.4s\n"
+    "str q18, [x25, x27]\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "fmin v24.4s, v24.4s, v27.4s\n"
+    "ldr q18, [%[inptr0], %[input_col_stride1]]\n"
+    "fmin v14.4s, v14.4s, v27.4s\n"
+    "ldr q30, [x9]\n"
+    "str q24, [x25, x28]\n"
+    "fmax v13.4s, v13.4s, v26.4s\n"
+    "str q14, [x26]\n"
+    "fmax v22.4s, v22.4s, v26.4s\n"
+    "fmin v13.4s, v13.4s, v27.4s\n"
+    "ldr q29, [x24, %[input_col_stride1]]\n"
+    "fmin v22.4s, v22.4s, v27.4s\n"
+    "ldr q24, [%[inptr0], x13]\n"
+    "str q13, [x26, %[output_col_stride1]]\n"
+    "fmax v21.4s, v21.4s, v26.4s\n"
+    "str q22, [x26, x27]\n"
+    "mov v12.16b, v23.16b\n"
+    "fmin v21.4s, v21.4s, v27.4s\n"
+    "ldr q27, [x20]\n"
+    "mov v8.16b, v23.16b\n"
+    "ldr q22, [x9, %[input_col_stride1]]\n"
+    "str q21, [x26, x28]\n"
+    "mov v11.16b, v23.16b\n"
+    "mov v16.16b, v23.16b\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "mov v7.16b, v23.16b\n"
+    "add x8, x8, #16\n"
+    "mov v10.16b, v23.16b\n"
+    "add x25, x25, #16\n"
+    "mov v14.16b, v23.16b\n"
+    "add x26, x26, #16\n"
+    "mov v15.16b, v23.16b\n"
+    "mov v17.16b, v23.16b\n"
+    "mov v9.16b, v23.16b\n"
+    "fmla v12.4s, v28.4s, v20.4s\n"
+    "fmla v8.4s, v25.4s, v20.4s\n"
+    "fmla v11.4s, v18.4s, v20.4s\n"
+    "fmla v16.4s, v30.4s, v20.4s\n"
+    "fmla v12.4s, v25.4s, v19.4s\n"
+    "fmla v8.4s, v30.4s, v19.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "fmla v8.4s, v29.4s, v6.4s\n"
+    "fmla v12.4s, v30.4s, v2.4s\n"
+    "fmla v12.4s, v29.4s, v4.4s\n"
+    "bne 2b\n"
+    "3:\n"
+    "mov v13.16b, v23.16b\n"
+    "ldr q21, [x24, x13]\n"
+    "mov v18.16b, v23.16b\n"
+    "prfm pldl1keep, [x24, x10]\n"
+    "fmla v11.4s, v29.4s, v19.4s\n"
+    "prfm pldl1keep, [%[inptr0], x21]\n"
+    "fmla v7.4s, v29.4s, v20.4s\n"
+    "ldr q25, [%[inptr0], x19]\n"
+    "fmla v12.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v11.4s, v24.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v10.4s, v24.4s, v20.4s\n"
+    "ldr q24, [x22]\n"
+    "fmla v8.4s, v27.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x10]\n"
+    "fmla v16.4s, v27.4s, v19.4s\n"
+    "prfm pldl1keep, [x24, x21]\n"
+    "fmla v14.4s, v27.4s, v20.4s\n"
+    "ldr q26, [x20, %[input_col_stride1]]\n"
+    "fmla v12.4s, v22.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v8.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v11.4s, v22.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x10]\n"
+    "fmla v7.4s, v22.4s, v19.4s\n"
+    "prfm pldl1keep, [x9, x21]\n"
+    "fmla v15.4s, v22.4s, v20.4s\n"
+    "ldr q30, [x9, x13]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x24, x18]\n"
+    "fmla v8.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x12]\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v7.4s, v21.4s, v6.4s\n"
+    "prfm pldl1keep, [x22, x10]\n"
+    "fmla v10.4s, v21.4s, v19.4s\n"
+    "prfm pldl1keep, [x20, x21]\n"
+    "fmla v17.4s, v21.4s, v20.4s\n"
+    "ldr q22, [x24, x19]\n"
+    "fmla v11.4s, v25.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v10.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x24, x12]\n"
+    "fmla v9.4s, v25.4s, v20.4s\n"
+    "ldr q21, [%[inptr0], x17]\n"
+    "fmla v16.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x10]\n"
+    "fmla v14.4s, v24.4s, v19.4s\n"
+    "ldr q24, [x23]\n"
+    "fmla v8.4s, v26.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x21]\n"
+    "fmla v16.4s, v26.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v7.4s, v26.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x12]\n"
+    "fmla v14.4s, v26.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x21]\n"
+    "fmla v15.4s, v26.4s, v19.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v13.4s, v26.4s, v20.4s\n"
+    "ldr q26, [x22, %[input_col_stride1]]\n"
+    "fmla v12.4s, v30.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, x12]\n"
+    "fmla v8.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v11.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x12]\n"
+    "fmla v16.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x23, x12]\n"
+    "fmla v7.4s, v30.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #160\n"
+    "fmla v10.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "fmla v17.4s, v30.4s, v19.4s\n"
+    "fmla v18.4s, v30.4s, v20.4s\n"
+    "ldr q27, [x20, x13]\n"
+    "fmla v11.4s, v22.4s, v3.4s\n"
+    "fmla v7.4s, v22.4s, v5.4s\n"
+    "fmla v10.4s, v22.4s, v4.4s\n"
+    "fmla v17.4s, v22.4s, v6.4s\n"
+    "fmla v9.4s, v22.4s, v19.4s\n"
+    "fmla v14.4s, v24.4s, v2.4s\n"
+    "mov v25.16b, v23.16b\n"
+    "fmla v16.4s, v26.4s, v1.4s\n"
+    "fmla v10.4s, v21.4s, v5.4s\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v25.4s, v22.4s, v20.4s\n"
+    "ldr q28, [x9, x19]\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "ldr q29, [x24, x17]\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "fmla v13.4s, v26.4s, v19.4s\n"
+    "mov v22.16b, v23.16b\n"
+    "fmla v8.4s, v27.4s, v0.4s\n"
+    "fmla v16.4s, v27.4s, v3.4s\n"
+    "fmla v7.4s, v27.4s, v1.4s\n"
+    "fmla v14.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "fmla v13.4s, v27.4s, v6.4s\n"
+    "fmla v18.4s, v27.4s, v19.4s\n"
+    "fmla v22.4s, v27.4s, v20.4s\n"
+    "mov v24.16b, v23.16b\n"
+    "mov v21.16b, v23.16b\n"
+    "fmla v11.4s, v28.4s, v0.4s\n"
+    "fmla v7.4s, v28.4s, v3.4s\n"
+    "fmla v10.4s, v28.4s, v1.4s\n"
+    "fmla v15.4s, v28.4s, v5.4s\n"
+    "fmla v17.4s, v28.4s, v4.4s\n"
+    "fmla v9.4s, v28.4s, v2.4s\n"
+    "fmla v18.4s, v28.4s, v6.4s\n"
+    "fmla v25.4s, v28.4s, v19.4s\n"
+    "fmla v24.4s, v28.4s, v20.4s\n"
+    "ldr q23, [%[inptr0], x11]\n"
+    "fmla v10.4s, v29.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #16\n"
+    "fmla v17.4s, v29.4s, v5.4s\n"
+    "fmla v9.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v6.4s\n"
+    "ldr q30, [x23, %[input_col_stride1]]\n"
+    "fmla v14.4s, v30.4s, v1.4s\n"
+    "fmla v13.4s, v30.4s, v2.4s\n"
+    "fmla v9.4s, v23.4s, v5.4s\n"
+    "ldr q23, [x22, x13]\n"
+    "fmla v16.4s, v23.4s, v0.4s\n"
+    "ldr q29, [x20, x19]\n"
+    "fmla v14.4s, v23.4s, v3.4s\n"
+    "fmla v15.4s, v23.4s, v1.4s\n"
+    "fmla v13.4s, v23.4s, v4.4s\n"
+    "fmla v18.4s, v23.4s, v2.4s\n"
+    "fmla v22.4s, v23.4s, v19.4s\n"
+    "ldr q23, [x9, x17]\n"
+    "fmla v7.4s, v29.4s, v0.4s\n"
+    "fmla v15.4s, v29.4s, v3.4s\n"
+    "fmla v17.4s, v29.4s, v1.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v18.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v2.4s\n"
+    "fmla v22.4s, v29.4s, v6.4s\n"
+    "fmla v24.4s, v29.4s, v19.4s\n"
+    "fmla v21.4s, v29.4s, v20.4s\n"
+    "ldr q26, [x24, x11]\n"
+    "fmla v10.4s, v23.4s, v0.4s\n"
+    "ldr q28, [x23, x13]\n"
+    "fmla v17.4s, v23.4s, v3.4s\n"
+    "add x24, x24, #16\n"
+    "fmla v9.4s, v23.4s, v1.4s\n"
+    "fmla v18.4s, v23.4s, v5.4s\n"
+    "fmla v25.4s, v23.4s, v4.4s\n"
+    "fmla v24.4s, v23.4s, v6.4s\n"
+    "fmla v14.4s, v28.4s, v0.4s\n"
+    "ldr q20, [x22, x19]\n"
+    "fmla v9.4s, v26.4s, v3.4s\n"
+    "fmla v13.4s, v28.4s, v1.4s\n"
+    "fmla v25.4s, v26.4s, v5.4s\n"
+    "ldr q26, [x20, x17]\n"
+    "fmla v22.4s, v28.4s, v2.4s\n"
+    "ldr q23, [x9, x11]\n"
+    "fmla v15.4s, v20.4s, v0.4s\n"
+    "add x9, x9, #16\n"
+    "fmla v13.4s, v20.4s, v3.4s\n"
+    "fmla v18.4s, v20.4s, v1.4s\n"
+    "fmla v22.4s, v20.4s, v4.4s\n"
+    "fmla v24.4s, v20.4s, v2.4s\n"
+    "fmla v21.4s, v20.4s, v19.4s\n"
+    "ldr q27, [x23, x19]\n"
+    "fmla v17.4s, v26.4s, v0.4s\n"
+    "ldr q20, [x22, x17]\n"
+    "fmla v18.4s, v26.4s, v3.4s\n"
+    "fmla v25.4s, v26.4s, v1.4s\n"
+    "fmla v22.4s, v26.4s, v5.4s\n"
+    "fmla v24.4s, v26.4s, v4.4s\n"
+    "fmla v21.4s, v26.4s, v6.4s\n"
+    "ldr q19, [x20, x11]\n"
+    "fmla v9.4s, v23.4s, v0.4s\n"
+    "ldr q28, [x23, x17]\n"
+    "fmla v25.4s, v23.4s, v3.4s\n"
+    "add x20, x20, #16\n"
+    "fmla v24.4s, v23.4s, v5.4s\n"
+    "ldr q29, [x22, x11]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "add x22, x22, #16\n"
+    "fmla v22.4s, v27.4s, v1.4s\n"
+    "fmla v21.4s, v27.4s, v2.4s\n"
+    "fmla v18.4s, v20.4s, v0.4s\n"
+    "ldr q30, [x23, x11]\n"
+    "fmla v24.4s, v20.4s, v1.4s\n"
+    "add x23, x23, #16\n"
+    "fmla v22.4s, v20.4s, v3.4s\n"
+    "fmla v21.4s, v20.4s, v4.4s\n"
+    "fmla v25.4s, v19.4s, v0.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v24.4s, v19.4s, v3.4s\n"
+    "fmov v27.4s, #6.0\n"
+    "fmla v21.4s, v19.4s, v5.4s\n"
+    "fmla v22.4s, v28.4s, v0.4s\n"
+    "fmax v12.4s, v12.4s, v26.4s\n"
+    "fmax v11.4s, v11.4s, v26.4s\n"
+    "fmla v24.4s, v29.4s, v0.4s\n"
+    "fmax v10.4s, v10.4s, v26.4s\n"
+    "fmla v21.4s, v28.4s, v1.4s\n"
+    "fmin v12.4s, v12.4s, v27.4s\n"
+    "fmin v11.4s, v11.4s, v27.4s\n"
+    "fmin v10.4s, v10.4s, v27.4s\n"
+    "str q12, [%[outptr0]]\n"
+    "fmax v9.4s, v9.4s, v26.4s\n"
+    "str q11, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v21.4s, v29.4s, v3.4s\n"
+    "str q10, [%[outptr0], x27]\n"
+    "fmin v9.4s, v9.4s, v27.4s\n"
+    "fmax v8.4s, v8.4s, v26.4s\n"
+    "fmax v7.4s, v7.4s, v26.4s\n"
+    "str q9, [%[outptr0], x28]\n"
+    "fmla v21.4s, v30.4s, v0.4s\n"
+    "fmin v8.4s, v8.4s, v27.4s\n"
+    "add %[outptr0], %[outptr0], #16\n"
+    "fmin v7.4s, v7.4s, v27.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "str q8, [x8]\n"
+    "fmax v25.4s, v25.4s, v26.4s\n"
+    "str q7, [x8, %[output_col_stride1]]\n"
+    "fmin v17.4s, v17.4s, v27.4s\n"
+    "fmin v25.4s, v25.4s, v27.4s\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str q17, [x8, x27]\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "str q25, [x8, x28]\n"
+    "fmin v16.4s, v16.4s, v27.4s\n"
+    "fmin v15.4s, v15.4s, v27.4s\n"
+    "add x8, x8, #16\n"
+    "str q16, [x25]\n"
+    "fmax v18.4s, v18.4s, v26.4s\n"
+    "str q15, [x25, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v26.4s\n"
+    "fmin v18.4s, v18.4s, v27.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "fmin v24.4s, v24.4s, v27.4s\n"
+    "fmax v13.4s, v13.4s, v26.4s\n"
+    "str q18, [x25, x27]\n"
+    "fmin v14.4s, v14.4s, v27.4s\n"
+    "str q24, [x25, x28]\n"
+    "fmin v13.4s, v13.4s, v27.4s\n"
+    "str q14, [x26]\n"
+    "fmax v22.4s, v22.4s, v26.4s\n"
+    "str q13, [x26, %[output_col_stride1]]\n"
+    "fmax v21.4s, v21.4s, v26.4s\n"
+    "fmin v22.4s, v22.4s, v27.4s\n"
+    "add x25, x25, #16\n"
+    "fmin v21.4s, v21.4s, v27.4s\n"
+    "str q22, [x26, x27]\n"
+    "str q21, [x26, x28]\n"
+    "add x26, x26, #16\n"
+    "4:\n"
+    "cbz x14, 7f\n"
+    "ldr s23, [%[wbptr]]\n"
+    "mov v12.16b, v23.16b\n"
+    "ldr s20, [%[wbptr], #4]\n"
+    "mov v8.16b, v23.16b\n"
+    "ldr s6, [%[wbptr], #8]\n"
+    "mov v11.16b, v23.16b\n"
+    "ldr s5, [%[wbptr], #12]\n"
+    "mov v16.16b, v23.16b\n"
+    "ldr s19, [%[wbptr], #16]\n"
+    "mov v7.16b, v23.16b\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "mov v10.16b, v23.16b\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "mov v14.16b, v23.16b\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "mov v15.16b, v23.16b\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "mov v17.16b, v23.16b\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "mov v9.16b, v23.16b\n"
+    "ldr s28, [%[inptr0]]\n"
+    "fmla v12.4s, v28.4s, v20.4s\n"
+    "ldr s25, [x24]\n"
+    "fmla v8.4s, v25.4s, v20.4s\n"
+    "ldr s18, [%[inptr0], %[input_col_stride1]]\n"
+    "fmla v11.4s, v18.4s, v20.4s\n"
+    "ldr s30, [x9]\n"
+    "fmla v12.4s, v25.4s, v19.4s\n"
+    "ldr s29, [x24, %[input_col_stride1]]\n"
+    "fmla v8.4s, v30.4s, v19.4s\n"
+    "ldr s24, [%[inptr0], x13]\n"
+    "fmla v16.4s, v30.4s, v20.4s\n"
+    "ldr s27, [x20]\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "ldr s22, [x9, %[input_col_stride1]]\n"
+    "fmla v8.4s, v29.4s, v6.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "subs x14, x14, #1\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "fmla v12.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [x24, x16]\n"
+    "prfm pldl1keep, [%[inptr0], x10]\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v12.4s, v29.4s, v4.4s\n"
+    "beq 6f\n"
+    "5:\n"
+    "mov v13.16b, v23.16b\n"
+    "ldr s21, [x24, x13]\n"
+    "mov v18.16b, v23.16b\n"
+    "prfm pldl1keep, [x24, x10]\n"
+    "fmla v11.4s, v29.4s, v19.4s\n"
+    "prfm pldl1keep, [%[inptr0], x21]\n"
+    "fmla v7.4s, v29.4s, v20.4s\n"
+    "ldr s25, [%[inptr0], x19]\n"
+    "fmla v12.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v11.4s, v24.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v10.4s, v24.4s, v20.4s\n"
+    "ldr s24, [x22]\n"
+    "fmla v8.4s, v27.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x10]\n"
+    "fmla v16.4s, v27.4s, v19.4s\n"
+    "prfm pldl1keep, [x24, x21]\n"
+    "fmla v14.4s, v27.4s, v20.4s\n"
+    "ldr s26, [x20, %[input_col_stride1]]\n"
+    "fmla v12.4s, v22.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v8.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v11.4s, v22.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x10]\n"
+    "fmla v7.4s, v22.4s, v19.4s\n"
+    "prfm pldl1keep, [x9, x21]\n"
+    "fmla v15.4s, v22.4s, v20.4s\n"
+    "ldr s30, [x9, x13]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x24, x18]\n"
+    "fmla v8.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x12]\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v7.4s, v21.4s, v6.4s\n"
+    "prfm pldl1keep, [x22, x10]\n"
+    "fmla v10.4s, v21.4s, v19.4s\n"
+    "prfm pldl1keep, [x20, x21]\n"
+    "fmla v17.4s, v21.4s, v20.4s\n"
+    "ldr s22, [x24, x19]\n"
+    "fmla v11.4s, v25.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v10.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x24, x12]\n"
+    "fmla v9.4s, v25.4s, v20.4s\n"
+    "ldr s21, [%[inptr0], x17]\n"
+    "fmla v16.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x10]\n"
+    "fmla v14.4s, v24.4s, v19.4s\n"
+    "ldr s24, [x23]\n"
+    "fmla v8.4s, v26.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x21]\n"
+    "fmla v16.4s, v26.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v7.4s, v26.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x12]\n"
+    "fmla v14.4s, v26.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x21]\n"
+    "fmla v15.4s, v26.4s, v19.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v13.4s, v26.4s, v20.4s\n"
+    "ldr s26, [x22, %[input_col_stride1]]\n"
+    "fmla v12.4s, v30.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, x12]\n"
+    "fmla v8.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v11.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x12]\n"
+    "fmla v16.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x23, x12]\n"
+    "fmla v7.4s, v30.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v10.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "subs x14, x14, #1\n"
+    "fmla v17.4s, v30.4s, v19.4s\n"
+    "fmla v18.4s, v30.4s, v20.4s\n"
+    "mov v25.16b, v23.16b\n"
+    "fmla v11.4s, v22.4s, v3.4s\n"
+    "fmla v7.4s, v22.4s, v5.4s\n"
+    "fmla v10.4s, v22.4s, v4.4s\n"
+    "fmla v17.4s, v22.4s, v6.4s\n"
+    "fmla v9.4s, v22.4s, v19.4s\n"
+    "fmla v25.4s, v22.4s, v20.4s\n"
+    "ldr s27, [x20, x13]\n"
+    "fmla v10.4s, v21.4s, v5.4s\n"
+    "fmla v14.4s, v24.4s, v2.4s\n"
+    "mov v22.16b, v23.16b\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "mov v24.16b, v23.16b\n"
+    "mov v21.16b, v23.16b\n"
+    "fmla v16.4s, v26.4s, v1.4s\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v13.4s, v26.4s, v19.4s\n"
+    "fmla v8.4s, v27.4s, v0.4s\n"
+    "ldr s28, [x9, x19]\n"
+    "fmla v16.4s, v27.4s, v3.4s\n"
+    "fmla v7.4s, v27.4s, v1.4s\n"
+    "fmla v14.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "fmla v13.4s, v27.4s, v6.4s\n"
+    "fmla v18.4s, v27.4s, v19.4s\n"
+    "fmla v22.4s, v27.4s, v20.4s\n"
+    "fmla v11.4s, v28.4s, v0.4s\n"
+    "ldr s29, [x24, x17]\n"
+    "fmla v7.4s, v28.4s, v3.4s\n"
+    "fmla v10.4s, v28.4s, v1.4s\n"
+    "fmla v15.4s, v28.4s, v5.4s\n"
+    "fmla v17.4s, v28.4s, v4.4s\n"
+    "fmla v9.4s, v28.4s, v2.4s\n"
+    "fmla v18.4s, v28.4s, v6.4s\n"
+    "fmla v25.4s, v28.4s, v19.4s\n"
+    "fmla v24.4s, v28.4s, v20.4s\n"
+    "fmla v10.4s, v29.4s, v3.4s\n"
+    "ldr s23, [%[inptr0], x11]\n"
+    "fmla v17.4s, v29.4s, v5.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v9.4s, v29.4s, v4.4s\n"
+    "prfm pldl1keep, [%[inptr0], #64]\n"
+    "fmla v25.4s, v29.4s, v6.4s\n"
+    "ldr s30, [x23, %[input_col_stride1]]\n"
+    "fmla v14.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x16]\n"
+    "fmla v9.4s, v23.4s, v5.4s\n"
+    "ldr s23, [x22, x13]\n"
+    "fmla v13.4s, v30.4s, v2.4s\n"
+    "ldr s29, [x20, x19]\n"
+    "fmla v16.4s, v23.4s, v0.4s\n"
+    "prfm pldl1keep, [%[inptr0], x10]\n"
+    "fmla v14.4s, v23.4s, v3.4s\n"
+    "fmla v15.4s, v23.4s, v1.4s\n"
+    "fmla v13.4s, v23.4s, v4.4s\n"
+    "fmla v18.4s, v23.4s, v2.4s\n"
+    "fmla v22.4s, v23.4s, v19.4s\n"
+    "ldr s23, [x9, x17]\n"
+    "fmla v7.4s, v29.4s, v0.4s\n"
+    "fmla v15.4s, v29.4s, v3.4s\n"
+    "fmla v17.4s, v29.4s, v1.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v18.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v2.4s\n"
+    "fmla v22.4s, v29.4s, v6.4s\n"
+    "fmla v24.4s, v29.4s, v19.4s\n"
+    "fmla v21.4s, v29.4s, v20.4s\n"
+    "ldr s26, [x24, x11]\n"
+    "fmla v10.4s, v23.4s, v0.4s\n"
+    "ldr s28, [x23, x13]\n"
+    "fmla v17.4s, v23.4s, v3.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v9.4s, v23.4s, v1.4s\n"
+    "prfm pldl1keep, [x24, #64]\n"
+    "fmla v18.4s, v23.4s, v5.4s\n"
+    "prfm pldl1keep, [x24, x16]\n"
+    "fmla v25.4s, v23.4s, v4.4s\n"
+    "fmla v24.4s, v23.4s, v6.4s\n"
+    "fmla v9.4s, v26.4s, v3.4s\n"
+    "ldr s20, [x22, x19]\n"
+    "fmla v14.4s, v28.4s, v0.4s\n"
+    "fmla v13.4s, v28.4s, v1.4s\n"
+    "fmla v25.4s, v26.4s, v5.4s\n"
+    "ldr s26, [x20, x17]\n"
+    "fmla v22.4s, v28.4s, v2.4s\n"
+    "ldr s23, [x9, x11]\n"
+    "fmla v15.4s, v20.4s, v0.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v13.4s, v20.4s, v3.4s\n"
+    "prfm pldl1keep, [x9, #64]\n"
+    "fmla v18.4s, v20.4s, v1.4s\n"
+    "prfm pldl1keep, [x9, x16]\n"
+    "fmla v22.4s, v20.4s, v4.4s\n"
+    "fmla v24.4s, v20.4s, v2.4s\n"
+    "fmla v21.4s, v20.4s, v19.4s\n"
+    "ldr s27, [x23, x19]\n"
+    "fmla v17.4s, v26.4s, v0.4s\n"
+    "ldr s20, [x22, x17]\n"
+    "fmla v18.4s, v26.4s, v3.4s\n"
+    "fmla v25.4s, v26.4s, v1.4s\n"
+    "fmla v22.4s, v26.4s, v5.4s\n"
+    "fmla v24.4s, v26.4s, v4.4s\n"
+    "fmla v21.4s, v26.4s, v6.4s\n"
+    "ldr s19, [x20, x11]\n"
+    "fmla v9.4s, v23.4s, v0.4s\n"
+    "ldr s28, [x23, x17]\n"
+    "fmla v25.4s, v23.4s, v3.4s\n"
+    "add x20, x20, #4\n"
+    "fmla v24.4s, v23.4s, v5.4s\n"
+    "ldr s29, [x22, x11]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, #64]\n"
+    "fmla v22.4s, v27.4s, v1.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v21.4s, v27.4s, v2.4s\n"
+    "ldr s30, [x23, x11]\n"
+    "fmla v18.4s, v20.4s, v0.4s\n"
+    "ldr s23, [%[wbptr]]\n"
+    "fmla v22.4s, v20.4s, v3.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v24.4s, v20.4s, v1.4s\n"
+    "fmla v21.4s, v20.4s, v4.4s\n"
+    "fmla v25.4s, v19.4s, v0.4s\n"
+    "ldr s20, [%[wbptr], #4]\n"
+    "fmla v22.4s, v28.4s, v0.4s\n"
+    "ldr s6, [%[wbptr], #8]\n"
+    "fmla v21.4s, v19.4s, v5.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v24.4s, v19.4s, v3.4s\n"
+    "ldr s19, [%[wbptr], #16]\n"
+    "fmax v12.4s, v12.4s, v26.4s\n"
+    "fmax v11.4s, v11.4s, v26.4s\n"
+    "fmla v21.4s, v28.4s, v1.4s\n"
+    "ldr s5, [%[wbptr], #12]\n"
+    "fmla v24.4s, v29.4s, v0.4s\n"
+    "ldr s4, [%[wbptr], #20]\n"
+    "fmax v10.4s, v10.4s, v26.4s\n"
+    "fmax v9.4s, v9.4s, v26.4s\n"
+    "fmla v21.4s, v29.4s, v3.4s\n"
+    "ldr s2, [%[wbptr], #28]\n"
+    "fmov v27.4s, #6.0\n"
+    "fmax v8.4s, v8.4s, v26.4s\n"
+    "fmax v7.4s, v7.4s, v26.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "fmla v21.4s, v30.4s, v0.4s\n"
+    "ldr s3, [%[wbptr], #24]\n"
+    "fmin v12.4s, v12.4s, v27.4s\n"
+    "ldr s1, [%[wbptr], #32]\n"
+    "fmin v11.4s, v11.4s, v27.4s\n"
+    "fmin v10.4s, v10.4s, v27.4s\n"
+    "str s12, [%[outptr0]]\n"
+    "fmin v9.4s, v9.4s, v27.4s\n"
+    "str s11, [%[outptr0], %[output_col_stride1]]\n"
+    "fmin v8.4s, v8.4s, v27.4s\n"
+    "str s10, [%[outptr0], x27]\n"
+    "fmin v7.4s, v7.4s, v27.4s\n"
+    "str s9, [%[outptr0], x28]\n"
+    "fmin v17.4s, v17.4s, v27.4s\n"
+    "str s8, [x8]\n"
+    "fmax v25.4s, v25.4s, v26.4s\n"
+    "str s7, [x8, %[output_col_stride1]]\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str s17, [x8, x27]\n"
+    "fmin v25.4s, v25.4s, v27.4s\n"
+    "fmin v16.4s, v16.4s, v27.4s\n"
+    "ldr s0, [%[wbptr], #36]\n"
+    "str s25, [x8, x28]\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "str s16, [x25]\n"
+    "fmax v18.4s, v18.4s, v26.4s\n"
+    "fmin v15.4s, v15.4s, v27.4s\n"
+    "ldr s28, [%[inptr0]]\n"
+    "fmin v18.4s, v18.4s, v27.4s\n"
+    "ldr s25, [x24]\n"
+    "str s15, [x25, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v26.4s\n"
+    "str s18, [x25, x27]\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "fmin v24.4s, v24.4s, v27.4s\n"
+    "ldr s18, [%[inptr0], %[input_col_stride1]]\n"
+    "fmin v14.4s, v14.4s, v27.4s\n"
+    "ldr s30, [x9]\n"
+    "str s24, [x25, x28]\n"
+    "fmax v13.4s, v13.4s, v26.4s\n"
+    "str s14, [x26]\n"
+    "fmax v22.4s, v22.4s, v26.4s\n"
+    "fmin v13.4s, v13.4s, v27.4s\n"
+    "ldr s29, [x24, %[input_col_stride1]]\n"
+    "fmin v22.4s, v22.4s, v27.4s\n"
+    "ldr s24, [%[inptr0], x13]\n"
+    "str s13, [x26, %[output_col_stride1]]\n"
+    "fmax v21.4s, v21.4s, v26.4s\n"
+    "str s22, [x26, x27]\n"
+    "mov v12.16b, v23.16b\n"
+    "fmin v21.4s, v21.4s, v27.4s\n"
+    "ldr s27, [x20]\n"
+    "mov v8.16b, v23.16b\n"
+    "ldr s22, [x9, %[input_col_stride1]]\n"
+    "str s21, [x26, x28]\n"
+    "mov v11.16b, v23.16b\n"
+    "mov v16.16b, v23.16b\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "mov v7.16b, v23.16b\n"
+    "add x8, x8, #4\n"
+    "mov v10.16b, v23.16b\n"
+    "add x25, x25, #4\n"
+    "mov v14.16b, v23.16b\n"
+    "add x26, x26, #4\n"
+    "mov v15.16b, v23.16b\n"
+    "mov v17.16b, v23.16b\n"
+    "mov v9.16b, v23.16b\n"
+    "fmla v12.4s, v28.4s, v20.4s\n"
+    "fmla v8.4s, v25.4s, v20.4s\n"
+    "fmla v11.4s, v18.4s, v20.4s\n"
+    "fmla v16.4s, v30.4s, v20.4s\n"
+    "fmla v12.4s, v25.4s, v19.4s\n"
+    "fmla v8.4s, v30.4s, v19.4s\n"
+    "fmla v12.4s, v18.4s, v6.4s\n"
+    "fmla v8.4s, v29.4s, v6.4s\n"
+    "fmla v12.4s, v30.4s, v2.4s\n"
+    "fmla v12.4s, v29.4s, v4.4s\n"
+    "bne 5b\n"
+    "6:\n"
+    "mov v13.16b, v23.16b\n"
+    "ldr s21, [x24, x13]\n"
+    "mov v18.16b, v23.16b\n"
+    "prfm pldl1keep, [x24, x10]\n"
+    "fmla v11.4s, v29.4s, v19.4s\n"
+    "prfm pldl1keep, [%[inptr0], x21]\n"
+    "fmla v7.4s, v29.4s, v20.4s\n"
+    "ldr s25, [%[inptr0], x19]\n"
+    "fmla v12.4s, v24.4s, v5.4s\n"
+    "prfm pldl1keep, [x22, #64]\n"
+    "fmla v11.4s, v24.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x16]\n"
+    "fmla v10.4s, v24.4s, v20.4s\n"
+    "ldr s24, [x22]\n"
+    "fmla v8.4s, v27.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x10]\n"
+    "fmla v16.4s, v27.4s, v19.4s\n"
+    "prfm pldl1keep, [x24, x21]\n"
+    "fmla v14.4s, v27.4s, v20.4s\n"
+    "ldr s26, [x20, %[input_col_stride1]]\n"
+    "fmla v12.4s, v22.4s, v1.4s\n"
+    "prfm pldl1keep, [%[inptr0], x18]\n"
+    "fmla v8.4s, v22.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, #64]\n"
+    "fmla v11.4s, v22.4s, v2.4s\n"
+    "prfm pldl1keep, [x22, x16]\n"
+    "fmla v16.4s, v22.4s, v6.4s\n"
+    "prfm pldl1keep, [x20, x10]\n"
+    "fmla v7.4s, v22.4s, v19.4s\n"
+    "prfm pldl1keep, [x9, x21]\n"
+    "fmla v15.4s, v22.4s, v20.4s\n"
+    "ldr s30, [x9, x13]\n"
+    "fmla v12.4s, v21.4s, v3.4s\n"
+    "prfm pldl1keep, [x24, x18]\n"
+    "fmla v8.4s, v21.4s, v5.4s\n"
+    "prfm pldl1keep, [%[inptr0], x12]\n"
+    "fmla v11.4s, v21.4s, v4.4s\n"
+    "prfm pldl1keep, [x23, x16]\n"
+    "fmla v7.4s, v21.4s, v6.4s\n"
+    "prfm pldl1keep, [x22, x10]\n"
+    "fmla v10.4s, v21.4s, v19.4s\n"
+    "prfm pldl1keep, [x20, x21]\n"
+    "fmla v17.4s, v21.4s, v20.4s\n"
+    "ldr s22, [x24, x19]\n"
+    "fmla v11.4s, v25.4s, v5.4s\n"
+    "prfm pldl1keep, [x9, x18]\n"
+    "fmla v10.4s, v25.4s, v6.4s\n"
+    "prfm pldl1keep, [x24, x12]\n"
+    "fmla v9.4s, v25.4s, v20.4s\n"
+    "ldr s21, [%[inptr0], x17]\n"
+    "fmla v16.4s, v24.4s, v2.4s\n"
+    "prfm pldl1keep, [x23, x10]\n"
+    "fmla v14.4s, v24.4s, v19.4s\n"
+    "ldr s24, [x23]\n"
+    "fmla v8.4s, v26.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x21]\n"
+    "fmla v16.4s, v26.4s, v4.4s\n"
+    "prfm pldl1keep, [x20, x18]\n"
+    "fmla v7.4s, v26.4s, v2.4s\n"
+    "prfm pldl1keep, [x9, x12]\n"
+    "fmla v14.4s, v26.4s, v6.4s\n"
+    "prfm pldl1keep, [x23, x21]\n"
+    "fmla v15.4s, v26.4s, v19.4s\n"
+    "prfm pldl1keep, [x22, x18]\n"
+    "fmla v13.4s, v26.4s, v20.4s\n"
+    "ldr s26, [x22, %[input_col_stride1]]\n"
+    "fmla v12.4s, v30.4s, v0.4s\n"
+    "prfm pldl1keep, [x20, x12]\n"
+    "fmla v8.4s, v30.4s, v3.4s\n"
+    "prfm pldl1keep, [x23, x18]\n"
+    "fmla v11.4s, v30.4s, v1.4s\n"
+    "prfm pldl1keep, [x22, x12]\n"
+    "fmla v16.4s, v30.4s, v5.4s\n"
+    "prfm pldl1keep, [x23, x12]\n"
+    "fmla v7.4s, v30.4s, v4.4s\n"
+    "add %[wbptr], %[wbptr], #40\n"
+    "fmla v10.4s, v30.4s, v2.4s\n"
+    "prfm pldl1keep, [%[wbptr], #64]\n"
+    "fmla v15.4s, v30.4s, v6.4s\n"
+    "fmla v17.4s, v30.4s, v19.4s\n"
+    "fmla v18.4s, v30.4s, v20.4s\n"
+    "ldr s27, [x20, x13]\n"
+    "fmla v11.4s, v22.4s, v3.4s\n"
+    "fmla v7.4s, v22.4s, v5.4s\n"
+    "fmla v10.4s, v22.4s, v4.4s\n"
+    "fmla v17.4s, v22.4s, v6.4s\n"
+    "fmla v9.4s, v22.4s, v19.4s\n"
+    "fmla v14.4s, v24.4s, v2.4s\n"
+    "mov v25.16b, v23.16b\n"
+    "fmla v16.4s, v26.4s, v1.4s\n"
+    "fmla v10.4s, v21.4s, v5.4s\n"
+    "fmla v15.4s, v26.4s, v2.4s\n"
+    "fmla v25.4s, v22.4s, v20.4s\n"
+    "ldr s28, [x9, x19]\n"
+    "fmla v9.4s, v21.4s, v6.4s\n"
+    "ldr s29, [x24, x17]\n"
+    "fmla v14.4s, v26.4s, v4.4s\n"
+    "fmla v13.4s, v26.4s, v19.4s\n"
+    "mov v22.16b, v23.16b\n"
+    "fmla v8.4s, v27.4s, v0.4s\n"
+    "fmla v16.4s, v27.4s, v3.4s\n"
+    "fmla v7.4s, v27.4s, v1.4s\n"
+    "fmla v14.4s, v27.4s, v5.4s\n"
+    "fmla v15.4s, v27.4s, v4.4s\n"
+    "fmla v17.4s, v27.4s, v2.4s\n"
+    "fmla v13.4s, v27.4s, v6.4s\n"
+    "fmla v18.4s, v27.4s, v19.4s\n"
+    "fmla v22.4s, v27.4s, v20.4s\n"
+    "mov v24.16b, v23.16b\n"
+    "mov v21.16b, v23.16b\n"
+    "fmla v11.4s, v28.4s, v0.4s\n"
+    "fmla v7.4s, v28.4s, v3.4s\n"
+    "fmla v10.4s, v28.4s, v1.4s\n"
+    "fmla v15.4s, v28.4s, v5.4s\n"
+    "fmla v17.4s, v28.4s, v4.4s\n"
+    "fmla v9.4s, v28.4s, v2.4s\n"
+    "fmla v18.4s, v28.4s, v6.4s\n"
+    "fmla v25.4s, v28.4s, v19.4s\n"
+    "fmla v24.4s, v28.4s, v20.4s\n"
+    "ldr s23, [%[inptr0], x11]\n"
+    "fmla v10.4s, v29.4s, v3.4s\n"
+    "add %[inptr0], %[inptr0], #4\n"
+    "fmla v17.4s, v29.4s, v5.4s\n"
+    "fmla v9.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v6.4s\n"
+    "ldr s30, [x23, %[input_col_stride1]]\n"
+    "fmla v14.4s, v30.4s, v1.4s\n"
+    "fmla v13.4s, v30.4s, v2.4s\n"
+    "fmla v9.4s, v23.4s, v5.4s\n"
+    "ldr s23, [x22, x13]\n"
+    "fmla v16.4s, v23.4s, v0.4s\n"
+    "ldr s29, [x20, x19]\n"
+    "fmla v14.4s, v23.4s, v3.4s\n"
+    "fmla v15.4s, v23.4s, v1.4s\n"
+    "fmla v13.4s, v23.4s, v4.4s\n"
+    "fmla v18.4s, v23.4s, v2.4s\n"
+    "fmla v22.4s, v23.4s, v19.4s\n"
+    "ldr s23, [x9, x17]\n"
+    "fmla v7.4s, v29.4s, v0.4s\n"
+    "fmla v15.4s, v29.4s, v3.4s\n"
+    "fmla v17.4s, v29.4s, v1.4s\n"
+    "fmla v13.4s, v29.4s, v5.4s\n"
+    "fmla v18.4s, v29.4s, v4.4s\n"
+    "fmla v25.4s, v29.4s, v2.4s\n"
+    "fmla v22.4s, v29.4s, v6.4s\n"
+    "fmla v24.4s, v29.4s, v19.4s\n"
+    "fmla v21.4s, v29.4s, v20.4s\n"
+    "ldr s26, [x24, x11]\n"
+    "fmla v10.4s, v23.4s, v0.4s\n"
+    "ldr s28, [x23, x13]\n"
+    "fmla v17.4s, v23.4s, v3.4s\n"
+    "add x24, x24, #4\n"
+    "fmla v9.4s, v23.4s, v1.4s\n"
+    "fmla v18.4s, v23.4s, v5.4s\n"
+    "fmla v25.4s, v23.4s, v4.4s\n"
+    "fmla v24.4s, v23.4s, v6.4s\n"
+    "fmla v14.4s, v28.4s, v0.4s\n"
+    "ldr s20, [x22, x19]\n"
+    "fmla v9.4s, v26.4s, v3.4s\n"
+    "fmla v13.4s, v28.4s, v1.4s\n"
+    "fmla v25.4s, v26.4s, v5.4s\n"
+    "ldr s26, [x20, x17]\n"
+    "fmla v22.4s, v28.4s, v2.4s\n"
+    "ldr s23, [x9, x11]\n"
+    "fmla v15.4s, v20.4s, v0.4s\n"
+    "add x9, x9, #4\n"
+    "fmla v13.4s, v20.4s, v3.4s\n"
+    "fmla v18.4s, v20.4s, v1.4s\n"
+    "fmla v22.4s, v20.4s, v4.4s\n"
+    "fmla v24.4s, v20.4s, v2.4s\n"
+    "fmla v21.4s, v20.4s, v19.4s\n"
+    "ldr s27, [x23, x19]\n"
+    "fmla v17.4s, v26.4s, v0.4s\n"
+    "ldr s20, [x22, x17]\n"
+    "fmla v18.4s, v26.4s, v3.4s\n"
+    "fmla v25.4s, v26.4s, v1.4s\n"
+    "fmla v22.4s, v26.4s, v5.4s\n"
+    "fmla v24.4s, v26.4s, v4.4s\n"
+    "fmla v21.4s, v26.4s, v6.4s\n"
+    "ldr s19, [x20, x11]\n"
+    "fmla v9.4s, v23.4s, v0.4s\n"
+    "ldr s28, [x23, x17]\n"
+    "fmla v25.4s, v23.4s, v3.4s\n"
+    "add x20, x20, #4\n"
+    "fmla v24.4s, v23.4s, v5.4s\n"
+    "ldr s29, [x22, x11]\n"
+    "fmla v13.4s, v27.4s, v0.4s\n"
+    "add x22, x22, #4\n"
+    "fmla v22.4s, v27.4s, v1.4s\n"
+    "fmla v21.4s, v27.4s, v2.4s\n"
+    "fmla v18.4s, v20.4s, v0.4s\n"
+    "ldr s30, [x23, x11]\n"
+    "fmla v24.4s, v20.4s, v1.4s\n"
+    "add x23, x23, #4\n"
+    "fmla v22.4s, v20.4s, v3.4s\n"
+    "fmla v21.4s, v20.4s, v4.4s\n"
+    "fmla v25.4s, v19.4s, v0.4s\n"
+    "movi v26.16b, #0\n"
+    "fmla v24.4s, v19.4s, v3.4s\n"
+    "fmov v27.4s, #6.0\n"
+    "fmla v21.4s, v19.4s, v5.4s\n"
+    "fmla v22.4s, v28.4s, v0.4s\n"
+    "fmax v12.4s, v12.4s, v26.4s\n"
+    "fmax v11.4s, v11.4s, v26.4s\n"
+    "fmla v24.4s, v29.4s, v0.4s\n"
+    "fmax v10.4s, v10.4s, v26.4s\n"
+    "fmla v21.4s, v28.4s, v1.4s\n"
+    "fmin v12.4s, v12.4s, v27.4s\n"
+    "fmin v11.4s, v11.4s, v27.4s\n"
+    "fmin v10.4s, v10.4s, v27.4s\n"
+    "str s12, [%[outptr0]]\n"
+    "fmax v9.4s, v9.4s, v26.4s\n"
+    "str s11, [%[outptr0], %[output_col_stride1]]\n"
+    "fmla v21.4s, v29.4s, v3.4s\n"
+    "str s10, [%[outptr0], x27]\n"
+    "fmin v9.4s, v9.4s, v27.4s\n"
+    "fmax v8.4s, v8.4s, v26.4s\n"
+    "fmax v7.4s, v7.4s, v26.4s\n"
+    "str s9, [%[outptr0], x28]\n"
+    "fmla v21.4s, v30.4s, v0.4s\n"
+    "fmin v8.4s, v8.4s, v27.4s\n"
+    "add %[outptr0], %[outptr0], #4\n"
+    "fmin v7.4s, v7.4s, v27.4s\n"
+    "fmax v17.4s, v17.4s, v26.4s\n"
+    "str s8, [x8]\n"
+    "fmax v25.4s, v25.4s, v26.4s\n"
+    "str s7, [x8, %[output_col_stride1]]\n"
+    "fmin v17.4s, v17.4s, v27.4s\n"
+    "fmin v25.4s, v25.4s, v27.4s\n"
+    "fmax v16.4s, v16.4s, v26.4s\n"
+    "str s17, [x8, x27]\n"
+    "fmax v15.4s, v15.4s, v26.4s\n"
+    "str s25, [x8, x28]\n"
+    "fmin v16.4s, v16.4s, v27.4s\n"
+    "fmin v15.4s, v15.4s, v27.4s\n"
+    "add x8, x8, #4\n"
+    "str s16, [x25]\n"
+    "fmax v18.4s, v18.4s, v26.4s\n"
+    "str s15, [x25, %[output_col_stride1]]\n"
+    "fmax v24.4s, v24.4s, v26.4s\n"
+    "fmin v18.4s, v18.4s, v27.4s\n"
+    "fmax v14.4s, v14.4s, v26.4s\n"
+    "fmin v24.4s, v24.4s, v27.4s\n"
+    "fmax v13.4s, v13.4s, v26.4s\n"
+    "str s18, [x25, x27]\n"
+    "fmin v14.4s, v14.4s, v27.4s\n"
+    "str s24, [x25, x28]\n"
+    "fmin v13.4s, v13.4s, v27.4s\n"
+    "str s14, [x26]\n"
+    "fmax v22.4s, v22.4s, v26.4s\n"
+    "str s13, [x26, %[output_col_stride1]]\n"
+    "fmax v21.4s, v21.4s, v26.4s\n"
+    "fmin v22.4s, v22.4s, v27.4s\n"
+    "add x25, x25, #4\n"
+    "fmin v21.4s, v21.4s, v27.4s\n"
+    "str s22, [x26, x27]\n"
+    "str s21, [x26, x28]\n"
+    "add x26, x26, #4\n"
+    "7:\n"
+    : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr)
+    : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float))
+    : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory"
+  );
 }
 
 #endif  // __aarch64__
 
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
+template class DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>;
 
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_u8_s32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_u8_s32.cpp
deleted file mode 100644
index 8f22a64..0000000
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_u8_s32.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "impl_u8_s32.hpp"
-
-namespace depthwise
-{
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 1, 1, uint8_t, int32_t>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 1, 1, uint8_t, int32_t>;
-
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
-
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-        ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-        ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 1, 1, uint8_t, int32_t>;
-}  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp16_fp16.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp16_fp16.cpp
deleted file mode 100644
index 09722d0..0000000
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp16_fp16.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "impl_fp16_fp16.hpp"
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-namespace depthwise
-{
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 2, 2, float16_t, float16_t>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 2, 2, float16_t, float16_t>;
-
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
-
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-        ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-        ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-        ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-        ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 3, 0>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 3>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 2, 2, float16_t, float16_t>;
-}  // namespace depthwise
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp32_fp32.cpp
index 05315ee..a04609d 100644
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp32_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_fp32_fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,142 +25,5 @@
 
 namespace depthwise
 {
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 2, 2, float, float>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 2, 2, float, float>;
-
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
-
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-  ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-  ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 6, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 7, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 7, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 7, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 7, 0, 3, 0>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 8, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 8, 0, 1, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 8, 0, 2, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 8, 0, 3, 0>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 3>,
-  },
-  {
-    ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 0>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 1>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 2>,
-    ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 3>,
-  },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 2, 2, float, float>;
+template class DepthwiseConvolution<4, 4, 3, 3, 2, 2, float, float, float>;
 }  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_u8_s32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_u8_s32.cpp
deleted file mode 100644
index cf51550..0000000
--- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_2x2_u8_s32.cpp
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "impl_u8_s32.hpp"
-
-namespace depthwise
-{
-using Conv = DepthwiseConvolution<4, 4, 3, 3, 2, 2, uint8_t, int32_t>;
-using ConvImpl = DepthwiseConvolutionImpl<4, 4, 3, 3, 2, 2, uint8_t, int32_t>;
-
-template <>
-const Conv::TileFn Conv::tilefn_unpadded = ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>;
-
-template <>
-const Conv::TileFn Conv::tilefn_top[n_in_pad_top_fns] = {
-        ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-        ConvImpl::template process_tile<true, 1, 0, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_left[n_in_pad_left_fns] = {
-        ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-        ConvImpl::template process_tile<true, 0, 1, 0, 0, 0, 0>,
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_bottom[n_in_pad_bottom_fns][n_out_pad_bottom_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 1, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 2, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 3, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 4, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 5, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 6, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 7, 0, 3, 0>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 1, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 2, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 8, 0, 3, 0>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_right[n_in_pad_right_fns][n_out_pad_right_fns] = {
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 0, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 1, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 2, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 3, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 4, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 5, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 6, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 7, 0, 3>,
-        },
-        {
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 0>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 1>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 2>,
-                ConvImpl::template process_tile<true, 0, 0, 0, 8, 0, 3>,
-        },
-};
-
-template <>
-const Conv::TileFn Conv::tilefn_generic = ConvImpl::template process_tile<false>;
-
-template class DepthwiseConvolution<4, 4, 3, 3, 2, 2, uint8_t, int32_t>;
-}  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp
new file mode 100644
index 0000000..692086c
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp"
+
+// TODO Move to common utilities somewhere
+template <size_t Size> struct DType { };
+template <> struct DType<1> { using scalar_type = uint8_t; };
+template <> struct DType<2> { using scalar_type = uint16_t; };
+template <> struct DType<4> { using scalar_type = uint32_t; };
+
+namespace depthwise
+{
+
+template <unsigned int KernelRows, unsigned int KernelColumns, size_t WeightSize, size_t BiasSize>
+void PackParameters<KernelRows, KernelColumns, WeightSize, BiasSize>::execute(
+  unsigned int n_channels,
+  void *buffer,
+  const void *weights,
+  const unsigned int weight_row_stride,
+  const unsigned int weight_col_stride,
+  const void *biases
+)
+{
+  using TWeight = typename DType<WeightSize>::scalar_type;
+  using TBias = typename DType<BiasSize>::scalar_type;
+
+  auto buffer_ptr = static_cast<uint8_t *>(buffer);
+  auto weights_ptr = static_cast<const TWeight *>(weights);
+  auto biases_ptr = static_cast<const TBias *>(biases);
+
+  const unsigned int veclen = 16 / WeightSize;
+  for (; n_channels >= veclen; n_channels -= veclen)
+  {
+    // Copy biases
+    for (unsigned int i = 0; i < veclen; i++)
+    {
+      auto ptr = reinterpret_cast<TBias *>(buffer_ptr);
+      *ptr = (biases_ptr == nullptr) ? 0x0 : *(biases_ptr++);
+      buffer_ptr += BiasSize;
+    }
+
+    // Copy weights
+    for (unsigned int i = 0; i < KernelRows; i++)
+    {
+      for (unsigned int j = 0; j < KernelColumns; j++)
+      {
+        for (unsigned int c = 0; c < veclen; c++)
+        {
+          *(reinterpret_cast<TWeight *>(buffer_ptr)) = weights_ptr[i*weight_row_stride + j*weight_col_stride + c];
+          buffer_ptr += WeightSize;
+        }
+      }
+    }
+    weights_ptr += veclen;
+  }
+  for (; n_channels; n_channels--)
+  {
+    // Copy bias
+    auto ptr = reinterpret_cast<TBias *>(buffer_ptr);
+    *ptr = (biases_ptr == nullptr) ? 0x0 : *(biases_ptr++);
+    buffer_ptr += BiasSize;
+
+    // Copy weights
+    for (unsigned int i = 0; i < KernelRows; i++)
+    {
+      for (unsigned int j = 0; j < KernelColumns; j++)
+      {
+        *(reinterpret_cast<TWeight *>(buffer_ptr)) = weights_ptr[i*weight_row_stride + j*weight_col_stride];
+        buffer_ptr += WeightSize;
+      }
+    }
+    weights_ptr++;
+  }
+}
+
+template struct PackParameters<3, 3, 2ul, 2ul>;
+template struct PackParameters<3, 3, 4ul, 4ul>;
+}  // namespace
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_qa8_qa8.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_qa8_qa8.cpp
new file mode 100644
index 0000000..1989f87
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_qa8_qa8.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "impl_qa8_qa8.hpp"
+
+namespace depthwise
+{
+template class QAsymm8DepthwiseConvolution<2, 2, 3, 3, 1, 1>;
+template class QAsymm8DepthwiseConvolution<2, 2, 3, 3, 2, 2>;
+}  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp
index dacfb24..1ae8128 100644
--- a/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -35,107 +35,60 @@
 
 #pragma once
 
+using namespace neon_convolution_kernels;
+
 namespace depthwise
 {
-// Partial specialisation for FP16 to FP16
-template <int OutputTileRows, int OutputTileCols,
-          int KernelRows, int KernelCols,
-          int StrideRows, int StrideCols>
-struct DepthwiseConvolutionImpl<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols, float16_t, float16_t>
-{
-  typedef DepthwiseConvolution<
-    OutputTileRows, OutputTileCols,
-    KernelRows, KernelCols,
-    StrideRows, StrideCols,
-    float16_t, float16_t
-  > DWC;
 
-  template <
-    bool Specialize=false,  // Specialize (or not) the method
-    int InPadTop=0,         // If specialized, top padding
-    int InPadLeft=0,        // If specialized, left padding
-    int InPadBottom=0,      // If specialized, bottom padding
-    int InPadRight=0,       // If specialized, right padding
-    int OutPadBottom=0,     // If specialized, bottom output padding
-    int OutPadRight=0       // If specialized, bottom right padding
-  >
-  static void process_tile(
-    const int n_channels,
-    const float16_t* const weights,
-    const int weight_row_stride,
-    const int weight_col_stride,
-    const float16_t* const inptr,
-    const int in_row_stride,
-    const int in_col_stride,
-    float16_t* const outptr,
-    const int out_row_stride,
-    const int out_col_stride,
-    const int in_pad_top=0,
-    const int in_pad_left=0,
-    const int in_pad_bottom=0,
-    const int in_pad_right=0,
-    const int out_pad_bottom=0,
-    const int out_pad_right=0,
-    const int input_offset=0,
-    const int weights_offset=0
-  );
-};
-
-
-template <int OTR, int OTC, int KR, int KC, int SR, int SC>
 template <
-  bool Specialize,
-  int InPadTop, int InPadLeft, int InPadBottom, int InPadRight,
-  int OutPadBottom, int OutPadRight
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
 >
-void DepthwiseConvolutionImpl<OTR, OTC, KR, KC, SR, SC, float16_t, float16_t>::process_tile(
-  const int n_channels,
-  const float16_t *__restrict__ const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float16_t *__restrict__ const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float16_t *__restrict__ const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int _in_pad_top,
-  const int _in_pad_left,
-  const int _in_pad_bottom,
-  const int _in_pad_right,
-  const int _out_pad_bottom,
-  const int _out_pad_right,
-  const int _input_offset,
-  const int _weights_offset
+DepthwiseConvolution<
+  OutputTileRows, OutputTileCols,
+  KernelRows, KernelCols, StrideRows, StrideCols,
+  float16_t, float16_t, float16_t
+>::DepthwiseConvolution(
+  int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+  ActivationFunction activation,
+  unsigned int padding_top,
+  unsigned int padding_left,
+  unsigned int padding_bottom,
+  unsigned int padding_right
+) : Base(
+      n_batches, n_input_rows, n_input_cols, n_channels, activation,
+      padding_top, padding_left, padding_bottom, padding_right
+    )
+{
+}
+
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+template <ActivationFunction Activation>
+void DepthwiseConvolution<
+  OutputTileRows, OutputTileCols,
+  KernelRows, KernelCols, StrideRows, StrideCols,
+  float16_t, float16_t, float16_t
+>::execute_tile(
+  int n_channels,
+  const void *weights_biases_ptr,
+  const float16_t *input,
+  const unsigned int in_row_stride,
+  const unsigned int in_col_stride,
+  float16_t *output,
+  const unsigned int out_row_stride,
+  const unsigned int out_col_stride
 )
 {
-  constexpr auto inner_tile_rows = DWC::inner_tile_rows;
-  constexpr auto inner_tile_cols = DWC::inner_tile_cols;
-  constexpr auto kernel_rows = DWC::kernel_rows;
-  constexpr auto kernel_cols = DWC::kernel_cols;
-  constexpr auto output_tile_rows = DWC::output_tile_rows;
-  constexpr auto output_tile_cols = DWC::output_tile_cols;
-  constexpr auto stride_rows = DWC::stride_rows;
-  constexpr auto stride_cols = DWC::stride_cols;
-
-  // Extract parameters
-  const int in_pad_top = Specialize ? InPadTop : _in_pad_top;
-  const int in_pad_left = Specialize ? InPadLeft : _in_pad_left;
-  const int in_pad_bottom = Specialize ? InPadBottom : _in_pad_bottom;
-  const int in_pad_right = Specialize ? InPadRight : _in_pad_right;
-  const int out_pad_bottom = Specialize ? OutPadBottom : _out_pad_bottom;
-  const int out_pad_right = Specialize ? OutPadRight : _out_pad_right;
-
-  // Compute valid ranges of the tile
-  const int in_cells_i = inner_tile_rows - in_pad_bottom;
-  const int in_cells_j = inner_tile_cols - in_pad_right;
-  const int out_cells_i = output_tile_rows - out_pad_bottom;
-  const int out_cells_j = output_tile_cols - out_pad_right;
-
   // Instantiate pointers
-  const float16_t* __restrict__ inptr_base = inptr;
-  const float16_t* __restrict__ wptr_base = weights;
-    float16_t* __restrict__ outptr_base = outptr;
+  const float16_t* __restrict__ inptr_base = input;
+  float16_t* __restrict__ outptr_base = output;
+  const float16_t* __restrict__ params = static_cast<const float16_t*>(weights_biases_ptr);
 
   // Perform the depthwise convolution
   int channels_remaining = n_channels;
@@ -143,74 +96,72 @@
   for (; channels_remaining >= 8; channels_remaining -= 8)
   {
     // Load input tile
-    float16x8_t u[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
+    float16x8_t u[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (int i = 0; i < Base::inner_tile_rows; i++)
     {
-      const float16_t* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-      for (int j = 0; j < inner_tile_cols; j++)
+      const float16_t* const inptr_row = inptr_base + i*in_row_stride;
+      for (int j = 0; j < Base::inner_tile_cols; j++)
       {
-        if (i < in_pad_top || in_cells_i <= i ||
-            j < in_pad_left || in_cells_j <= j)
-        {
-          u[i][j] = vdupq_n_f16(0.0f);
-        }
-        else
-        {
-          u[i][j] = vld1q_f16(inptr_row + (j - in_pad_left)*in_col_stride);
-        }
+        u[i][j] = vld1q_f16(inptr_row + j*in_col_stride);
       }
     }
     inptr_base += 8;
 
     // Load weights tile
-    float16x8_t w[kernel_rows][kernel_cols];
-    for (int i = 0; i < kernel_rows; i++)
+    float16x8_t vbias = vld1q_f16(params);
+    params += 8;
+
+    float16x8_t w[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
     {
-      const float16_t* const wptr_row = wptr_base + i*weight_row_stride;
-      for (int j = 0; j < kernel_cols; j++)
+      for (unsigned int j = 0; j < KernelCols; j++)
       {
-        w[i][j] = vld1q_f16(wptr_row + j*weight_col_stride);
+        w[i][j] = vld1q_f16(params);
+        params += 8;
       }
     }
-    wptr_base += 8;
 
     // Perform the convolution
-    float16x8_t v[output_tile_rows][output_tile_cols];
-    for (int out_i = 0; out_i < out_cells_i; out_i++)
+    float16x8_t v[OutputTileRows][OutputTileCols];
+    for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++)
     {
-      for (int out_j = 0; out_j < out_cells_j; out_j++)
+      for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++)
       {
+        v[out_i][out_j] = vbias;
+
         // Base co-ordinate
-        const int base_i = out_i * stride_rows;
-        const int base_j = out_j * stride_cols;
+        const int base_i = out_i * StrideRows;
+        const int base_j = out_j * StrideCols;
 
         // Fill the accumulator
-        for (int in_i = 0; in_i < kernel_rows; in_i++)
+        for (unsigned int in_i = 0; in_i < KernelRows; in_i++)
         {
-          const int i = base_i + in_i;
-          for (int in_j = 0; in_j < kernel_cols; in_j++)
+          const unsigned int i = base_i + in_i;
+          for (unsigned int in_j = 0; in_j < KernelCols; in_j++)
           {
-            const int j = base_j + in_j;
-            if (in_i == 0 && in_j == 0)
-            {
-              // v[out_i][out_j] = w[in_i][in_j] * u[i][j];
-              v[out_i][out_j] = vmulq_f16(w[in_i][in_j], u[i][j]);
-            }
-            else
-            {
-              // v[out_i][out_j] += w[in_i][in_j] * u[i][j];
-              v[out_i][out_j] = vaddq_f16(v[out_i][out_j], vmulq_f16(w[in_i][in_j], u[i][j]));
-            }
+            const unsigned int j = base_j + in_j;
+            v[out_i][out_j] = vaddq_f16(v[out_i][out_j], vmulq_f16(w[in_i][in_j], u[i][j]));
           }
         }
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = vmaxq_f16(v[out_i][out_j], vdupq_n_f16(0.0f));
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = vminq_f16(v[out_i][out_j], vdupq_n_f16(6.0f));
+        }
       }
     }
 
     // Store the output tile
-    for (int i = 0; i < out_cells_i; i++)
+    for (unsigned int i = 0; i < OutputTileRows; i++)
     {
       float16_t* const outptr_row = outptr_base + i*out_row_stride;
-      for (int j = 0; j < out_cells_j; j++)
+      for (unsigned int j = 0; j < OutputTileCols; j++)
       {
         vst1q_f16(outptr_row + j*out_col_stride, v[i][j]);
       }
@@ -221,68 +172,70 @@
   for (; channels_remaining; channels_remaining--)
   {
     // Load input tile
-    float16_t u[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
+    float16_t u[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (int i = 0; i < Base::inner_tile_rows; i++)
     {
-      const float16_t* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-      for (int j = 0; j < inner_tile_cols; j++)
+      const float16_t* const inptr_row = inptr_base + i*in_row_stride;
+      for (int j = 0; j < Base::inner_tile_cols; j++)
       {
-        if (i < in_pad_top || in_cells_i <= i ||
-            j < in_pad_left || in_cells_j <= j)
-        {
-          u[i][j] = static_cast<float16_t>(0);
-        }
-        else
-        {
-          u[i][j] = *(inptr_row + (j - in_pad_left)*in_col_stride);
-        }
+        u[i][j] = *(inptr_row + j*in_col_stride);
       }
     }
     inptr_base++;
 
     // Load weights tile
-    float16_t w[kernel_rows][kernel_cols];
-    for (int i = 0; i < kernel_rows; i++)
+    float16_t bias = *(params++);
+    float16_t w[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
     {
-      const float16_t* const wptr_row = wptr_base + i*weight_row_stride;
-      for (int j = 0; j < kernel_cols; j++)
+      for (unsigned int j = 0; j < KernelCols; j++)
       {
-        w[i][j] = *(wptr_row + j*weight_col_stride);
+        w[i][j] = *(params++);
       }
     }
-    wptr_base++;
 
     // Perform the convolution
-    float16_t v[output_tile_rows][output_tile_cols];
-    for (int out_i = 0; out_i < out_cells_i; out_i++)
+    float16_t v[OutputTileRows][OutputTileCols];
+    for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++)
     {
-      for (int out_j = 0; out_j < out_cells_j; out_j++)
+      for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++)
       {
         // Clear the accumulator
-        v[out_i][out_j] = static_cast<float16_t>(0);
+        v[out_i][out_j] = bias;
 
         // Base co-ordinate
-        const int base_i = out_i * stride_rows;
-        const int base_j = out_j * stride_cols;
+        const int base_i = out_i * StrideRows;
+        const int base_j = out_j * StrideCols;
 
         // Fill the accumulator
-        for (int in_i = 0; in_i < kernel_rows; in_i++)
+        for (unsigned int in_i = 0; in_i < KernelRows; in_i++)
         {
-          const int i = base_i + in_i;
-          for (int in_j = 0; in_j < kernel_cols; in_j++)
+          const unsigned int i = base_i + in_i;
+          for (unsigned int in_j = 0; in_j < KernelCols; in_j++)
           {
             const int j = base_j + in_j;
             v[out_i][out_j] += w[in_i][in_j] * u[i][j];
           }
         }
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = std::max<float16_t>(0.0f, v[out_i][out_j]);
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = std::min<float16_t>(6.0f, v[out_i][out_j]);
+        }
       }
     }
 
     // Store the output tile
-    for (int i = 0; i < out_cells_i; i++)
+    for (unsigned int i = 0; i < OutputTileRows; i++)
     {
       float16_t* const outptr_row = outptr_base + i*out_row_stride;
-      for (int j = 0; j < out_cells_j; j++)
+      for (unsigned int j = 0; j < OutputTileCols; j++)
       {
         *(outptr_row + j*out_col_stride) = v[i][j];
       }
@@ -290,5 +243,6 @@
     outptr_base++;
   }
 }
+
 }  // namespace depthwise
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp
index 840086f..10d110f 100644
--- a/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -35,107 +35,60 @@
 
 #pragma once
 
+using namespace neon_convolution_kernels;
+
 namespace depthwise
 {
-// Partial specialisation for FP32 to FP32
-template <int OutputTileRows, int OutputTileCols,
-          int KernelRows, int KernelCols,
-          int StrideRows, int StrideCols>
-struct DepthwiseConvolutionImpl<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols, float, float>
-{
-  typedef DepthwiseConvolution<
-    OutputTileRows, OutputTileCols,
-    KernelRows, KernelCols,
-    StrideRows, StrideCols,
-    float, float
-  > DWC;
 
-  template <
-    bool Specialize=false,  // Specialize (or not) the method
-    int InPadTop=0,         // If specialized, top padding
-    int InPadLeft=0,        // If specialized, left padding
-    int InPadBottom=0,      // If specialized, bottom padding
-    int InPadRight=0,       // If specialized, right padding
-    int OutPadBottom=0,     // If specialized, bottom output padding
-    int OutPadRight=0       // If specialized, bottom right padding
-  >
-  static void process_tile(
-    const int n_channels,
-    const float* const weights,
-    const int weight_row_stride,
-    const int weight_col_stride,
-    const float* const inptr,
-    const int in_row_stride,
-    const int in_col_stride,
-    float* const outptr,
-    const int out_row_stride,
-    const int out_col_stride,
-    const int in_pad_top=0,
-    const int in_pad_left=0,
-    const int in_pad_bottom=0,
-    const int in_pad_right=0,
-    const int out_pad_bottom=0,
-    const int out_pad_right=0,
-    const int input_offset=0,
-    const int weights_offset=0
-  );
-};
-
-
-template <int OTR, int OTC, int KR, int KC, int SR, int SC>
 template <
-  bool Specialize,
-  int InPadTop, int InPadLeft, int InPadBottom, int InPadRight,
-  int OutPadBottom, int OutPadRight
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
 >
-void DepthwiseConvolutionImpl<OTR, OTC, KR, KC, SR, SC, float, float>::process_tile(
-  const int n_channels,
-  const float *__restrict__ const weights,
-  const int weight_row_stride,
-  const int weight_col_stride,
-  const float *__restrict__ const inptr,
-  const int in_row_stride,
-  const int in_col_stride,
-  float *__restrict__ const outptr,
-  const int out_row_stride,
-  const int out_col_stride,
-  const int _in_pad_top,
-  const int _in_pad_left,
-  const int _in_pad_bottom,
-  const int _in_pad_right,
-  const int _out_pad_bottom,
-  const int _out_pad_right,
-  const int _input_offset,
-  const int _weights_offset
+DepthwiseConvolution<
+  OutputTileRows, OutputTileCols,
+  KernelRows, KernelCols, StrideRows, StrideCols,
+  float, float, float
+>::DepthwiseConvolution(
+  int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+  ActivationFunction activation,
+  unsigned int padding_top,
+  unsigned int padding_left,
+  unsigned int padding_bottom,
+  unsigned int padding_right
+) : Base(
+      n_batches, n_input_rows, n_input_cols, n_channels, activation,
+      padding_top, padding_left, padding_bottom, padding_right
+    )
+{
+}
+
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+template <ActivationFunction Activation>
+void DepthwiseConvolution<
+  OutputTileRows, OutputTileCols,
+  KernelRows, KernelCols, StrideRows, StrideCols,
+  float, float, float
+>::execute_tile(
+  int n_channels,
+  const void *weights_biases_ptr,
+  const float *input,
+  const unsigned int in_row_stride,
+  const unsigned int in_col_stride,
+  float *output,
+  const unsigned int out_row_stride,
+  const unsigned int out_col_stride
 )
 {
-  constexpr auto inner_tile_rows = DWC::inner_tile_rows;
-  constexpr auto inner_tile_cols = DWC::inner_tile_cols;
-  constexpr auto kernel_rows = DWC::kernel_rows;
-  constexpr auto kernel_cols = DWC::kernel_cols;
-  constexpr auto output_tile_rows = DWC::output_tile_rows;
-  constexpr auto output_tile_cols = DWC::output_tile_cols;
-  constexpr auto stride_rows = DWC::stride_rows;
-  constexpr auto stride_cols = DWC::stride_cols;
-
-  // Extract parameters
-  const int in_pad_top = Specialize ? InPadTop : _in_pad_top;
-  const int in_pad_left = Specialize ? InPadLeft : _in_pad_left;
-  const int in_pad_bottom = Specialize ? InPadBottom : _in_pad_bottom;
-  const int in_pad_right = Specialize ? InPadRight : _in_pad_right;
-  const int out_pad_bottom = Specialize ? OutPadBottom : _out_pad_bottom;
-  const int out_pad_right = Specialize ? OutPadRight : _out_pad_right;
-
-  // Compute valid ranges of the tile
-  const int in_cells_i = inner_tile_rows - in_pad_bottom;
-  const int in_cells_j = inner_tile_cols - in_pad_right;
-  const int out_cells_i = output_tile_rows - out_pad_bottom;
-  const int out_cells_j = output_tile_cols - out_pad_right;
-
   // Instantiate pointers
-  const float* __restrict__ inptr_base = inptr;
-  const float* __restrict__ wptr_base = weights;
-  float* __restrict__ outptr_base = outptr;
+  const float* __restrict__ inptr_base = input;
+  float* __restrict__ outptr_base = output;
+  const float* __restrict__ params = static_cast<const float*>(weights_biases_ptr);
 
   // Perform the depthwise convolution
   int channels_remaining = n_channels;
@@ -143,74 +96,74 @@
   for (; channels_remaining >= 4; channels_remaining -= 4)
   {
     // Load input tile
-    float32x4_t u[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
+    float32x4_t u[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (int i = 0; i < Base::inner_tile_rows; i++)
     {
-      const float* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-      for (int j = 0; j < inner_tile_cols; j++)
+      const float* const inptr_row = inptr_base + i*in_row_stride;
+      for (int j = 0; j < Base::inner_tile_cols; j++)
       {
-        if (i < in_pad_top || in_cells_i <= i ||
-            j < in_pad_left || in_cells_j <= j)
-        {
-          u[i][j] = vdupq_n_f32(0.0f);
-        }
-        else
-        {
-          u[i][j] = vld1q_f32(inptr_row + (j - in_pad_left)*in_col_stride);
-        }
+        u[i][j] = vld1q_f32(inptr_row + j*in_col_stride);
       }
     }
     inptr_base += 4;
 
     // Load weights tile
-    float32x4_t w[kernel_rows][kernel_cols];
-    for (int i = 0; i < kernel_rows; i++)
+    float32x4_t vbias = vld1q_f32(params);
+    params += 4;
+
+    float32x4_t w[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
     {
-      const float* const wptr_row = wptr_base + i*weight_row_stride;
-      for (int j = 0; j < kernel_cols; j++)
+      for (unsigned int j = 0; j < KernelCols; j++)
       {
-        w[i][j] = vld1q_f32(wptr_row + j*weight_col_stride);
+        w[i][j] = vld1q_f32(params);
+        params += 4;
       }
     }
-    wptr_base += 4;
 
     // Perform the convolution
-    float32x4_t v[output_tile_rows][output_tile_cols];
-    for (int out_i = 0; out_i < out_cells_i; out_i++)
+    float32x4_t v[OutputTileRows][OutputTileCols];
+    for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++)
     {
-      for (int out_j = 0; out_j < out_cells_j; out_j++)
+      for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++)
       {
+        v[out_i][out_j] = vbias;
+
         // Base co-ordinate
-        const int base_i = out_i * stride_rows;
-        const int base_j = out_j * stride_cols;
+        const int base_i = out_i * StrideRows;
+        const int base_j = out_j * StrideCols;
 
         // Fill the accumulator
-        for (int in_i = 0; in_i < kernel_rows; in_i++)
+        for (unsigned int in_i = 0; in_i < KernelRows; in_i++)
         {
-          const int i = base_i + in_i;
-          for (int in_j = 0; in_j < kernel_cols; in_j++)
+          const unsigned int i = base_i + in_i;
+          for (unsigned int in_j = 0; in_j < KernelCols; in_j++)
           {
-            const int j = base_j + in_j;
-            if (in_i == 0 && in_j == 0)
-            {
-              // v[out_i][out_j] = w[in_i][in_j] * u[i][j];
-              v[out_i][out_j] = vmulq_f32(w[in_i][in_j], u[i][j]);
-            }
-            else
-            {
-              // v[out_i][out_j] += w[in_i][in_j] * u[i][j];
-              v[out_i][out_j] = vmlaq_f32(v[out_i][out_j], w[in_i][in_j], u[i][j]);
-            }
+            const unsigned int j = base_j + in_j;
+
+            // v[out_i][out_j] += w[in_i][in_j] * u[i][j];
+            v[out_i][out_j] = vmlaq_f32(v[out_i][out_j], w[in_i][in_j], u[i][j]);
           }
         }
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = vmaxq_f32(v[out_i][out_j], vdupq_n_f32(0.0f));
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = vminq_f32(v[out_i][out_j], vdupq_n_f32(6.0f));
+        }
       }
     }
 
     // Store the output tile
-    for (int i = 0; i < out_cells_i; i++)
+    for (unsigned int i = 0; i < OutputTileRows; i++)
     {
       float* const outptr_row = outptr_base + i*out_row_stride;
-      for (int j = 0; j < out_cells_j; j++)
+      for (unsigned int j = 0; j < OutputTileCols; j++)
       {
         vst1q_f32(outptr_row + j*out_col_stride, v[i][j]);
       }
@@ -221,68 +174,70 @@
   for (; channels_remaining; channels_remaining--)
   {
     // Load input tile
-    float u[inner_tile_rows][inner_tile_cols];
-    for (int i = 0; i < inner_tile_rows; i++)
+    float u[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (int i = 0; i < Base::inner_tile_rows; i++)
     {
-      const float* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-      for (int j = 0; j < inner_tile_cols; j++)
+      const float* const inptr_row = inptr_base + i*in_row_stride;
+      for (int j = 0; j < Base::inner_tile_cols; j++)
       {
-        if (i < in_pad_top || in_cells_i <= i ||
-            j < in_pad_left || in_cells_j <= j)
-        {
-          u[i][j] = static_cast<float>(0);
-        }
-        else
-        {
-          u[i][j] = *(inptr_row + (j - in_pad_left)*in_col_stride);
-        }
+        u[i][j] = *(inptr_row + j*in_col_stride);
       }
     }
     inptr_base++;
 
     // Load weights tile
-    float w[kernel_rows][kernel_cols];
-    for (int i = 0; i < kernel_rows; i++)
+    float bias = *(params++);
+    float w[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
     {
-      const float* const wptr_row = wptr_base + i*weight_row_stride;
-      for (int j = 0; j < kernel_cols; j++)
+      for (unsigned int j = 0; j < KernelCols; j++)
       {
-        w[i][j] = *(wptr_row + j*weight_col_stride);
+        w[i][j] = *(params++);
       }
     }
-    wptr_base++;
 
     // Perform the convolution
-    float v[output_tile_rows][output_tile_cols];
-    for (int out_i = 0; out_i < out_cells_i; out_i++)
+    float v[OutputTileRows][OutputTileCols];
+    for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++)
     {
-      for (int out_j = 0; out_j < out_cells_j; out_j++)
+      for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++)
       {
         // Clear the accumulator
-        v[out_i][out_j] = static_cast<float>(0);
+        v[out_i][out_j] = bias;
 
         // Base co-ordinate
-        const int base_i = out_i * stride_rows;
-        const int base_j = out_j * stride_cols;
+        const int base_i = out_i * StrideRows;
+        const int base_j = out_j * StrideCols;
 
         // Fill the accumulator
-        for (int in_i = 0; in_i < kernel_rows; in_i++)
+        for (unsigned int in_i = 0; in_i < KernelRows; in_i++)
         {
-          const int i = base_i + in_i;
-          for (int in_j = 0; in_j < kernel_cols; in_j++)
+          const unsigned int i = base_i + in_i;
+          for (unsigned int in_j = 0; in_j < KernelCols; in_j++)
           {
             const int j = base_j + in_j;
             v[out_i][out_j] += w[in_i][in_j] * u[i][j];
           }
         }
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = std::max(0.0f, v[out_i][out_j]);
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          v[out_i][out_j] = std::min(6.0f, v[out_i][out_j]);
+        }
       }
     }
 
     // Store the output tile
-    for (int i = 0; i < out_cells_i; i++)
+    for (unsigned int i = 0; i < OutputTileRows; i++)
     {
       float* const outptr_row = outptr_base + i*out_row_stride;
-      for (int j = 0; j < out_cells_j; j++)
+      for (unsigned int j = 0; j < OutputTileCols; j++)
       {
         *(outptr_row + j*out_col_stride) = v[i][j];
       }
diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp
new file mode 100644
index 0000000..72f7c6b
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ *
+ *          NOTE: Header to be included by implementation files only.
+ *
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+
+#include <limits>
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp"
+
+#pragma once
+
+// Comment the following to use floating-point based quantisation, leave
+// uncommented to use fixed-point.
+#define FIXED_POINT_REQUANTISATION 1
+
+using namespace neon_convolution_kernels;
+using namespace qasymm8;
+
+template <typename T>
+struct clamp_to_limits
+{
+  template <typename U>
+  static inline U clamp(const U& v)
+  {
+    const std::numeric_limits<T> limits;
+    const U min = static_cast<U>(limits.min());
+    const U max = static_cast<U>(limits.max());
+    return std::min(std::max(v, min), max);
+  }
+
+  template <typename U>
+  static inline T clamp_and_cast(const U& v)
+  {
+    return static_cast<U>(clamp(v));
+  }
+};
+
+template <typename T>
+inline T saturating_doubling_high_mul(const T&, const int32_t&);
+
+template <>
+inline int32x4_t saturating_doubling_high_mul(const int32x4_t& a, const int32_t& b)
+{
+  return vqrdmulhq_n_s32(a, b);
+}
+
+template <>
+inline int32_t saturating_doubling_high_mul(const int32_t& a, const int32_t& b)
+{
+  return vget_lane_s32(vqrdmulh_n_s32(vdup_n_s32(a), b), 0);
+}
+
+template <typename T>
+inline T rounding_divide_by_exp2(const T& x, const int exponent);
+
+template <>
+inline int32x4_t rounding_divide_by_exp2(const int32x4_t& x, const int exponent)
+{
+  const int32x4_t shift = vdupq_n_s32(-exponent);
+  const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift), 31);
+  const int32x4_t fixed = vqaddq_s32(x, fixup);
+  return vrshlq_s32(fixed, shift);
+}
+
+template <>
+inline int32x2_t rounding_divide_by_exp2(const int32x2_t& x, const int exponent)
+{
+  const int32x2_t shift = vdup_n_s32(-exponent);
+  const int32x2_t fixup = vshr_n_s32(vand_s32(x, shift), 31);
+  const int32x2_t fixed = vqadd_s32(x, fixup);
+  return vrshl_s32(fixed, shift);
+}
+
+template <>
+inline int32_t rounding_divide_by_exp2(const int32_t& x, const int exponent)
+{
+  const int32x2_t xs = vdup_n_s32(x);
+  return vget_lane_s32(rounding_divide_by_exp2(xs, exponent), 0);
+}
+
+namespace depthwise
+{
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::QAsymm8DepthwiseConvolution(
+  int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+  const ActivationFunction activation,
+  const QAsymm8Params& weight_quantisation,
+  const QAsymm8Params& input_quantisation,
+  const QAsymm8Params& output_quantisation,
+  unsigned int padding_top,
+  unsigned int padding_left,
+  unsigned int padding_bottom,
+  unsigned int padding_right
+) : QAsymm8DepthwiseConvolution(
+    n_batches, n_input_rows, n_input_cols, n_channels,
+    activation, weight_quantisation, input_quantisation, output_quantisation,
+    QAsymm8RescaleParams::make_rescale_params(weight_quantisation, input_quantisation, output_quantisation),
+    padding_top, padding_left, padding_bottom, padding_right
+  )
+{
+}
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::QAsymm8DepthwiseConvolution(
+  int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+  const ActivationFunction activation,
+  const QAsymm8Params& weight_quantisation,
+  const QAsymm8Params& input_quantisation,
+  const QAsymm8Params& output_quantisation,
+  const QAsymm8RescaleParams& rescale_params,
+  unsigned int padding_top,
+  unsigned int padding_left,
+  unsigned int padding_bottom,
+  unsigned int padding_right
+) : Base(
+    n_batches, n_input_rows, n_input_cols, n_channels,
+    get_activation_fn(activation, output_quantisation),
+    padding_top, padding_left, padding_bottom, padding_right
+  ),
+  _weights_quant(weight_quantisation),
+  _inputs_quant(input_quantisation),
+  _output_quant(output_quantisation),
+  rescale_parameters(rescale_params)
+{
+}
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+ActivationFunction QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::get_activation_fn(
+  const ActivationFunction activation,
+  const QAsymm8Params& output_quant
+)
+{
+  if (
+    (activation == ActivationFunction::ReLU &&
+     output_quant.quantize(0) == 0) ||
+    (activation == ActivationFunction::ReLU6 &&
+     output_quant.quantize(0) == 0 &&
+     output_quant.dequantize(255) <= 6.0f)
+  )
+  {
+    // If the range of values which can be represented by a quantized value are
+    // within the range that would be produced by the activation function, then
+    // the activation function is redundant and can be skipped.
+    return ActivationFunction::None;
+  }
+  else if(
+    activation == ActivationFunction::ReLU6 &&
+    output_quant.dequantize(255) <= 6.0f
+  )
+  {
+    // If the largest value that can be represented by a quantized value is
+    // lower than the upper boundary, then the activation function can be
+    // relaxed to a ReLU.
+    return ActivationFunction::ReLU;
+  }
+
+  return activation;
+}
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+uint8_t QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::_input_padding_value(void) const
+{
+  return _inputs_quant.offset;
+}
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+void QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::_pack_params(
+  void * const buffer,
+  const void * const weights,
+  const unsigned int weight_row_stride,
+  const unsigned int weight_col_stride,
+  const void * const biases
+) const
+{
+  const uint8_t *wptr = static_cast<const uint8_t *>(weights);
+  const int32_t *bptr = static_cast<const int32_t *>(biases);
+  uint8_t *outptr = static_cast<uint8_t *>(buffer);
+
+  // We set the vector length to use quad registers on Aarch64 and only doubles
+  // on Aarch32. NOTE For SVE set this to the actual vector length.
+#if defined(__aarch64__)
+  unsigned int veclen = 16;
+#else
+#if defined(__arm__)
+  unsigned int veclen = 8;
+#endif
+#endif
+
+  // Compute the rank 0 offset arising from the quantisation parameters.
+  const int32_t rank0_offset = (KernelRows * KernelCols *
+                                static_cast<int32_t>(_weights_quant.offset) *
+                                static_cast<int32_t>(_inputs_quant.offset));
+
+  // While there are channels left to process, pack a vector length of them at
+  // a time and reduce the size of vector used as the size of the tensor
+  // decreases.
+  for (
+    unsigned int n_channels = this->n_channels(); n_channels;
+    n_channels -= veclen,
+    outptr += veclen*(sizeof(int32_t) + this->kernel_rows*this->kernel_cols)
+  )
+  {
+    // NOTE Ignore this section if using SVE, the vector length remains the
+    // same and we just don't fill a full register for the tail.
+    while (n_channels < veclen)
+    {
+      // Reduce the vector length to either 8 or 1 (scalar)
+      // TODO Support more vector lengths in `execute_tile`.
+      veclen = (veclen == 16) ? 8 : 1;
+    }
+
+    // Get pointers to bias and weight portions of the output structure.
+    int32_t *out_bptr = reinterpret_cast<int32_t *>(outptr);
+    uint8_t *out_wptr = outptr + veclen*sizeof(int32_t);
+
+    // Copy a vector length of elements
+    for (unsigned int n = 0; n < veclen && n < n_channels; n++)
+    {
+      int32_t bias = (bptr != nullptr) ? *(bptr++) : 0;
+      uint32_t weight_sum = 0;
+
+      for (unsigned int i = 0; i < KernelRows; i++)
+      {
+        uint8_t *row_outptr = out_wptr + i*KernelCols*veclen;
+        for (unsigned int j = 0; j < KernelCols; j++)
+        {
+          uint8_t w = *(wptr + i*weight_row_stride + j*weight_col_stride);
+          row_outptr[j*veclen + n] = w;
+          weight_sum += static_cast<uint32_t>(w);
+        }
+      }
+      wptr++;
+
+      // Include in the bias contributions from the quantisation offset
+      int32_t rank1_offset = static_cast<int32_t>(
+        static_cast<uint32_t>(_inputs_quant.offset) * weight_sum
+      );
+      out_bptr[n] = bias + rank0_offset - rank1_offset;
+    }
+  }
+}
+
+template <
+  unsigned int OutputTileRows, unsigned int OutputTileCols,
+  unsigned int KernelRows, unsigned int KernelCols,
+  unsigned int StrideRows, unsigned int StrideCols
+>
+template<ActivationFunction Activation>
+void QAsymm8DepthwiseConvolution<
+  OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols
+>::execute_tile(
+  int n_channels,
+  const void* packed_params,
+  const uint8_t* inptr,
+  const unsigned int in_row_stride,
+  const unsigned int in_col_stride,
+  uint8_t* outptr,
+  const unsigned int out_row_stride,
+  const unsigned int out_col_stride
+)
+{
+  // Activation parameters (unused if Activation is None)
+  const uint8_t aqmin = _output_quant.offset;
+  const uint8_t aqmax = (Activation == ActivationFunction::ReLU6) ?
+    std::min<uint8_t>(255u, _output_quant.quantize(6.0f)) : 255u;
+
+  // Byte type pointer to weights and biases
+  const uint8_t *wbptr = static_cast<const uint8_t *>(packed_params);
+
+#if defined(__aarch64__)  // Under Aarch64 only use quad registers
+  for (; n_channels >= 16; n_channels -= 16)
+  {
+    // Load biases
+    const int32x4_t biases[4] = {
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr)),
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 4),
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 8),
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 12)
+    };
+    wbptr += 16*sizeof(int32_t);
+
+    // Load weights
+    uint8x16_t weights[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
+    {
+      for (unsigned int j = 0; j < KernelCols; j++)
+      {
+        weights[i][j] = vld1q_u8(wbptr);
+        wbptr += 16;
+      }
+    }
+
+    // Load the input activations
+    uint8x16_t inputs[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (unsigned int i = 0; i < Base::inner_tile_rows; i++)
+    {
+      for (unsigned int j = 0; j < Base::inner_tile_cols; j++)
+      {
+        inputs[i][j] = vld1q_u8(inptr + i*in_row_stride + j*in_col_stride);
+      }
+    }
+    inptr += 16;
+
+    // Perform the convolution
+    for (unsigned int oi = 0; oi < OutputTileRows; oi++)
+    {
+      for (unsigned int oj = 0; oj < OutputTileCols; oj++)
+      {
+        // Two sets of operations are required, we perform the
+        // multiply-accumulates for the convolution proper but must also sum
+        // the tile elements to account for the _weight_ offset.
+        uint32x4_t accs[4];
+        for (unsigned int i = 0; i < 4; i++)
+        {
+          accs[i] = reinterpret_cast<uint32x4_t>(biases[i]);
+        }
+
+        for (unsigned int wi = 0; wi < KernelRows; wi++)
+        {
+          for (unsigned int wj = 0; wj < KernelCols; wj++)
+          {
+            // Get relevant weight and activation pixel
+            const uint8x16_t w = weights[wi][wj];
+            const uint8x16_t x = inputs[oi*StrideRows + wi][oj*StrideCols + wj];
+
+            // Perform multiplication and accumulation
+            const uint16x8_t muls[2] = {
+              vmull_u8(vget_low_u8(w), vget_low_u8(x)),
+              vmull_u8(vget_high_u8(w), vget_high_u8(x))
+            };
+
+            const uint8x8_t woffset = vdup_n_u8(_weights_quant.offset);
+            const uint16x8_t sum_elems[2] = {
+              vmull_u8(vget_low_u8(x), woffset),
+              vmull_u8(vget_high_u8(x), woffset)
+            };
+
+            const uint32x4_t tmps[4] = {
+              vsubl_u16(vget_low_u16(muls[0]), vget_low_u16(sum_elems[0])),
+              vsubl_u16(vget_high_u16(muls[0]), vget_high_u16(sum_elems[0])),
+              vsubl_u16(vget_low_u16(muls[1]), vget_low_u16(sum_elems[1])),
+              vsubl_u16(vget_high_u16(muls[1]), vget_high_u16(sum_elems[1])),
+            };
+            for (unsigned int i = 0; i < 4; i++)
+            {
+              accs[i] = vaddq_u32(accs[i], tmps[i]);
+            }
+          }
+        }
+
+        // Rescale the accumulator and add in the new offset.
+        uint32x4_t final_accs[4];
+        for (unsigned int i = 0; i < 4; i++)
+        {
+#ifdef FIXED_POINT_REQUANTISATION
+          const int32x4_t y = rounding_divide_by_exp2(
+            saturating_doubling_high_mul(
+              reinterpret_cast<int32x4_t>(accs[i]), rescale_parameters.multiplier
+            ),
+            rescale_parameters.shift
+          );
+          const int32x4_t offset = reinterpret_cast<int32x4_t>(vdupq_n_u32(_output_quant.offset));
+          final_accs[i] = reinterpret_cast<uint32x4_t>(vmaxq_s32(vaddq_s32(y, offset), vdupq_n_s32(0)));
+#else  // floating point requantisation
+          float32x4_t fp_acc = vcvtq_f32_s32(reinterpret_cast<int32x4_t>(accs[i]));
+          fp_acc = vmulq_f32(fp_acc, vdupq_n_f32(rescale_parameters.rescale));
+          fp_acc = vaddq_f32(fp_acc, vdupq_n_f32(static_cast<float>(_output_quant.offset)));
+          fp_acc = vmaxq_f32(fp_acc, vdupq_n_f32(0.0f));
+          final_accs[i] = vcvtq_u32_f32(fp_acc);
+#endif
+        }
+
+        uint8x16_t output = vcombine_u8(
+          vqmovn_u16(vcombine_u16(vqmovn_u32(final_accs[0]), vqmovn_u32(final_accs[1]))),
+          vqmovn_u16(vcombine_u16(vqmovn_u32(final_accs[2]), vqmovn_u32(final_accs[3])))
+        );
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          output = vmaxq_u8(output, vdupq_n_u8(aqmin));
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          output = vminq_u8(output, vdupq_n_u8(aqmax));
+        }
+
+        vst1q_u8(outptr + oi*out_row_stride + oj*out_col_stride, output);
+      }
+    }
+    outptr += 16;
+  }
+#endif  // defined(__aarch64__)
+  for (; n_channels >= 8; n_channels -= 8)
+  {
+    const int32x4_t biases[2] = {
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr)),
+      vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 4),
+    };
+    wbptr += 8*sizeof(int32_t);
+
+    uint8x8_t weights[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
+    {
+      for (unsigned int j = 0; j < KernelCols; j++)
+      {
+        weights[i][j] = vld1_u8(wbptr);
+        wbptr += 8;
+      }
+    }
+
+    uint8x8_t inputs[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (unsigned int i = 0; i < Base::inner_tile_rows; i++)
+    {
+      for (unsigned int j = 0; j < Base::inner_tile_cols; j++)
+      {
+        inputs[i][j] = vld1_u8(inptr + i*in_row_stride + j*in_col_stride);
+      }
+    }
+    inptr += 8;
+
+    for (unsigned int oi = 0; oi < OutputTileRows; oi++)
+    {
+      for (unsigned int oj = 0; oj < OutputTileCols; oj++)
+      {
+        uint32x4_t accs[2];
+        for (unsigned int i = 0; i < 2; i++)
+        {
+          accs[i] = reinterpret_cast<uint32x4_t>(biases[i]);
+        }
+
+        for (unsigned int wi = 0; wi < KernelRows; wi++)
+        {
+          for (unsigned int wj = 0; wj < KernelCols; wj++)
+          {
+            const uint8x8_t w = weights[wi][wj];
+            const uint8x8_t x = inputs[oi*StrideRows + wi][oj*StrideCols + wj];
+
+            const uint16x8_t muls = vmull_u8(w, x);
+            const uint8x8_t woffset = vdup_n_u8(_weights_quant.offset);
+            const uint16x8_t sum_elems = vmull_u8(x, woffset);
+
+            const uint32x4_t tmps[2] = {
+              vsubl_u16(vget_low_u16(muls), vget_low_u16(sum_elems)),
+              vsubl_u16(vget_high_u16(muls), vget_high_u16(sum_elems)),
+            };
+            for (unsigned int i = 0; i < 2; i++)
+            {
+              accs[i] = vaddq_u32(accs[i], tmps[i]);
+            }
+          }
+        }
+
+        uint32x4_t final_accs[2];
+        for (unsigned int i = 0; i < 2; i++)
+        {
+#ifdef FIXED_POINT_REQUANTISATION
+          const int32x4_t y = rounding_divide_by_exp2(
+            saturating_doubling_high_mul(
+              reinterpret_cast<int32x4_t>(accs[i]), rescale_parameters.multiplier
+            ),
+            rescale_parameters.shift
+          );
+          const int32x4_t offset = reinterpret_cast<int32x4_t>(vdupq_n_u32(_output_quant.offset));
+          final_accs[i] = reinterpret_cast<uint32x4_t>(vmaxq_s32(vaddq_s32(y, offset), vdupq_n_s32(0)));
+#else  // floating point requantisation
+          float32x4_t fp_acc = vcvtq_f32_s32(reinterpret_cast<int32x4_t>(accs[i]));
+          fp_acc = vmulq_f32(fp_acc, vdupq_n_f32(rescale_parameters.rescale));
+          fp_acc = vaddq_f32(fp_acc, vdupq_n_f32(static_cast<float>(_output_quant.offset)));
+          fp_acc = vmaxq_f32(fp_acc, vdupq_n_f32(0.0f));
+          final_accs[i] = vcvtq_u32_f32(fp_acc);
+#endif
+        }
+
+        uint8x8_t output = vqmovn_u16(vcombine_u16(vqmovn_u32(final_accs[0]), vqmovn_u32(final_accs[1])));
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          output = vmax_u8(output, vdup_n_u8(aqmin));
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          output = vmin_u8(output, vdup_n_u8(aqmax));
+        }
+
+        vst1_u8(outptr + oi*out_row_stride + oj*out_col_stride, output);
+      }
+    }
+    outptr += 8;
+  }
+  for (; n_channels; n_channels--)
+  {
+    // Load bias
+    const int32_t bias = *reinterpret_cast<const int32_t *>(wbptr);
+    wbptr += sizeof(int32_t);
+
+    // Load weights
+    uint8_t weights[KernelRows][KernelCols];
+    for (unsigned int i = 0; i < KernelRows; i++)
+    {
+      for (unsigned int j = 0; j < KernelCols; j++)
+      {
+        weights[i][j] = *(wbptr++);
+      }
+    }
+
+    // Load the input activations
+    uint8_t inputs[Base::inner_tile_rows][Base::inner_tile_cols];
+    for (unsigned int i = 0; i < Base::inner_tile_rows; i++)
+    {
+      for (unsigned int j = 0; j < Base::inner_tile_cols; j++)
+      {
+        inputs[i][j] = *(inptr + i*in_row_stride + j*in_col_stride);
+      }
+    }
+    inptr++;
+
+    // Perform the convolution
+    for (unsigned int oi = 0; oi < OutputTileRows; oi++)
+    {
+      for (unsigned int oj = 0; oj < OutputTileCols; oj++)
+      {
+        int32_t acc = bias;
+        uint32_t element_sum = 0;
+
+        for (unsigned int wi = 0; wi < KernelRows; wi++)
+        {
+          for (unsigned int wj = 0; wj < KernelCols; wj++)
+          {
+            const auto w = weights[wi][wj], x = inputs[oi*StrideRows + wi][oj*StrideCols + wj];
+            acc += static_cast<int32_t>(static_cast<uint32_t>(w) * static_cast<uint32_t>(x));
+            element_sum += static_cast<uint32_t>(x);
+          }
+        }
+
+        acc -= static_cast<int32_t>(element_sum) * static_cast<int32_t>(_weights_quant.offset);
+
+        // Requantize
+#ifdef FIXED_POINT_REQUANTISATION
+        acc = rounding_divide_by_exp2(
+            saturating_doubling_high_mul(acc, rescale_parameters.multiplier),
+            rescale_parameters.shift
+        );
+        acc += _output_quant.offset;
+        uint8_t output = clamp_to_limits<uint8_t>::clamp_and_cast<int32_t>(acc);
+#else  // floating point requantization
+        float fp_acc = static_cast<float>(acc);
+        fp_acc *= rescale_parameters.rescale;
+        fp_acc += static_cast<float>(_output_quant.offset);
+        fp_acc = std::max<float>(fp_acc, 0.0f);
+        uint8_t output = static_cast<uint8_t>(std::min<int32_t>(static_cast<int32_t>(fp_acc), 255));
+#endif
+
+        // Apply the activation function
+        if (Activation == ActivationFunction::ReLU ||
+            Activation == ActivationFunction::ReLU6)
+        {
+          output = std::max(output, aqmin);
+        }
+        if (Activation == ActivationFunction::ReLU6)
+        {
+          output = std::min(output, aqmax);
+        }
+
+        *(outptr + oi*out_row_stride + oj*out_col_stride) = output;
+      }
+    }
+    outptr++;
+  }
+}
+
+}  // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_u8_s32.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_u8_s32.hpp
deleted file mode 100644
index d0d8de5..0000000
--- a/src/core/NEON/kernels/convolution/depthwise/impl_u8_s32.hpp
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- *
- *          NOTE: Header to be included by implementation files only.
- *
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
-#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp"
-
-#pragma once
-
-namespace depthwise
-{
-// Partial specialisation for U8 to S32
-template <int OutputTileRows, int OutputTileCols,
-        int KernelRows, int KernelCols,
-        int StrideRows, int StrideCols>
-struct DepthwiseConvolutionImpl<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols, uint8_t, int32_t>
-{
-    typedef DepthwiseConvolution<
-            OutputTileRows, OutputTileCols,
-            KernelRows, KernelCols,
-            StrideRows, StrideCols,
-            uint8_t, int32_t
-    > DWC;
-
-    template <
-            bool Specialize=false,  // Specialize (or not) the method
-            int InPadTop=0,         // If specialized, top padding
-            int InPadLeft=0,        // If specialized, left padding
-            int InPadBottom=0,      // If specialized, bottom padding
-            int InPadRight=0,       // If specialized, right padding
-            int OutPadBottom=0,     // If specialized, bottom output padding
-            int OutPadRight=0       // If specialized, bottom right padding
-    >
-    static void process_tile(
-            const int n_channels,
-            const uint8_t* const weights,
-            const int weight_row_stride,
-            const int weight_col_stride,
-            const uint8_t* const inptr,
-            const int in_row_stride,
-            const int in_col_stride,
-            int32_t* const outptr,
-            const int out_row_stride,
-            const int out_col_stride,
-            const int in_pad_top=0,
-            const int in_pad_left=0,
-            const int in_pad_bottom=0,
-            const int in_pad_right=0,
-            const int out_pad_bottom=0,
-            const int out_pad_right=0,
-            const int input_offset=0,
-            const int weights_offset=0);
-};
-
-
-template <int OTR, int OTC, int KR, int KC, int SR, int SC>
-template <
-        bool Specialize,
-        int InPadTop, int InPadLeft, int InPadBottom, int InPadRight,
-        int OutPadBottom, int OutPadRight
->
-void DepthwiseConvolutionImpl<OTR, OTC, KR, KC, SR, SC, uint8_t, int32_t>::process_tile(
-        const int n_channels,
-        const uint8_t *__restrict__ const weights,
-        const int weight_row_stride,
-        const int weight_col_stride,
-        const uint8_t *__restrict__ const inptr,
-        const int in_row_stride,
-        const int in_col_stride,
-        int32_t *__restrict__ const outptr,
-        const int out_row_stride,
-        const int out_col_stride,
-        const int _in_pad_top,
-        const int _in_pad_left,
-        const int _in_pad_bottom,
-        const int _in_pad_right,
-        const int _out_pad_bottom,
-        const int _out_pad_right,
-        const int _input_offset,
-        const int _weights_offset
-)
-{
-    constexpr auto inner_tile_rows = DWC::inner_tile_rows;
-    constexpr auto inner_tile_cols = DWC::inner_tile_cols;
-    constexpr auto kernel_rows = DWC::kernel_rows;
-    constexpr auto kernel_cols = DWC::kernel_cols;
-    constexpr auto output_tile_rows = DWC::output_tile_rows;
-    constexpr auto output_tile_cols = DWC::output_tile_cols;
-    constexpr auto stride_rows = DWC::stride_rows;
-    constexpr auto stride_cols = DWC::stride_cols;
-
-    // Extract parameters
-    const int in_pad_top = Specialize ? InPadTop : _in_pad_top;
-    const int in_pad_left = Specialize ? InPadLeft : _in_pad_left;
-    const int in_pad_bottom = Specialize ? InPadBottom : _in_pad_bottom;
-    const int in_pad_right = Specialize ? InPadRight : _in_pad_right;
-    const int out_pad_bottom = Specialize ? OutPadBottom : _out_pad_bottom;
-    const int out_pad_right = Specialize ? OutPadRight : _out_pad_right;
-
-    // Compute valid ranges of the tile
-    const int in_cells_i = inner_tile_rows - in_pad_bottom;
-    const int in_cells_j = inner_tile_cols - in_pad_right;
-    const int out_cells_i = output_tile_rows - out_pad_bottom;
-    const int out_cells_j = output_tile_cols - out_pad_right;
-
-    // Instantiate pointers
-    const uint8_t* __restrict__ inptr_base = inptr;
-    const uint8_t* __restrict__ wptr_base = weights;
-    int32_t* __restrict__ outptr_base = outptr;
-
-    // Perform the depthwise convolution
-    int channels_remaining = n_channels;
-#ifdef __aarch64__
-    const int32x4_t v_input_offset = vdupq_n_s32(_input_offset);
-    const int32x4_t v_weights_offset = vdupq_n_s32(_weights_offset);
-    for (; channels_remaining >= 16; channels_remaining -= 16)
-    {
-        // Load input tile
-        int32x4x4_t u[inner_tile_rows][inner_tile_cols];
-        for (int i = 0; i < inner_tile_rows; i++)
-        {
-            const uint8_t* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-            for (int j = 0; j < inner_tile_cols; j++)
-            {
-                if (i < in_pad_top || in_cells_i <= i ||
-                    j < in_pad_left || in_cells_j <= j)
-                {
-                    u[i][j].val[0] = vdupq_n_s32(0);
-                    u[i][j].val[1] = vdupq_n_s32(0);
-                    u[i][j].val[2] = vdupq_n_s32(0);
-                    u[i][j].val[3] = vdupq_n_s32(0);
-                }
-                else
-                {
-                    const uint8x16_t uv = vld1q_u8(inptr_row + (j - in_pad_left)*in_col_stride);
-                    u[i][j].val[0] = vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(uv)))));
-                    u[i][j].val[1] = vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_low_u8(uv)))));
-                    u[i][j].val[2] = vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_high_u8(uv)))));
-                    u[i][j].val[3] = vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_high_u8(uv)))));
-                }
-            }
-        }
-        inptr_base += 16;
-
-        // Load weights tile
-        int32x4x4_t w[kernel_rows][kernel_cols];
-        for (int i = 0; i < kernel_rows; i++)
-        {
-            const uint8_t* const wptr_row = wptr_base + i*weight_row_stride;
-            for (int j = 0; j < kernel_cols; j++)
-            {
-                const uint8x16_t wv = vld1q_u8(wptr_row + j*weight_col_stride);
-                w[i][j].val[0] = vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(wv)))));
-                w[i][j].val[1] = vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_low_u8(wv)))));
-                w[i][j].val[2] = vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_high_u8(wv)))));
-                w[i][j].val[3] = vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_high_u8(wv)))));
-            }
-        }
-        wptr_base += 16;
-
-        // Perform the convolution
-        int32x4x4_t v[output_tile_rows][output_tile_cols];
-        for (int out_i = 0; out_i < out_cells_i; out_i++)
-        {
-            for (int out_j = 0; out_j < out_cells_j; out_j++)
-            {
-                // Base co-ordinate
-                const int base_i = out_i * stride_rows;
-                const int base_j = out_j * stride_cols;
-
-                // Fill the accumulator
-                for (int in_i = 0; in_i < kernel_rows; in_i++)
-                {
-                    const int i = base_i + in_i;
-                    for (int in_j = 0; in_j < kernel_cols; in_j++)
-                    {
-                        const int j = base_j + in_j;
-                        if (in_i == 0 && in_j == 0)
-                        {
-                            // v[out_i][out_j] = w[in_i][in_j] * u[i][j];
-                            v[out_i][out_j].val[0] = vmulq_s32(w[in_i][in_j].val[0], u[i][j].val[0]);
-                            v[out_i][out_j].val[1] = vmulq_s32(w[in_i][in_j].val[1], u[i][j].val[1]);
-                            v[out_i][out_j].val[2] = vmulq_s32(w[in_i][in_j].val[2], u[i][j].val[2]);
-                            v[out_i][out_j].val[3] = vmulq_s32(w[in_i][in_j].val[3], u[i][j].val[3]);
-                        }
-                        else
-                        {
-                            // v[out_i][out_j] += w[in_i][in_j] * u[i][j];
-                            v[out_i][out_j].val[0] = vmlaq_s32(v[out_i][out_j].val[0], w[in_i][in_j].val[0], u[i][j].val[0]);
-                            v[out_i][out_j].val[1] = vmlaq_s32(v[out_i][out_j].val[1], w[in_i][in_j].val[1], u[i][j].val[1]);
-                            v[out_i][out_j].val[2] = vmlaq_s32(v[out_i][out_j].val[2], w[in_i][in_j].val[2], u[i][j].val[2]);
-                            v[out_i][out_j].val[3] = vmlaq_s32(v[out_i][out_j].val[3], w[in_i][in_j].val[3], u[i][j].val[3]);
-                        }
-                    }
-                }
-            }
-        }
-
-        // Store the output tile
-        for (int i = 0; i < out_cells_i; i++)
-        {
-            int32_t* const outptr_row = outptr_base + i*out_row_stride;
-            for (int j = 0; j < out_cells_j; j++)
-            {
-                vst1q_s32(outptr_row + j*out_col_stride, v[i][j].val[0]);
-                vst1q_s32(outptr_row + j*out_col_stride + 4, v[i][j].val[1]);
-                vst1q_s32(outptr_row + j*out_col_stride + 8, v[i][j].val[2]);
-                vst1q_s32(outptr_row + j*out_col_stride + 12, v[i][j].val[3]);
-            }
-        }
-        outptr_base += 16;
-    }
-#endif  // __aarch64__
-    for (; channels_remaining; channels_remaining--)
-    {
-        // Load input tile
-        int32_t u[inner_tile_rows][inner_tile_cols];
-        for (int i = 0; i < inner_tile_rows; i++)
-        {
-            const uint8_t* const inptr_row = inptr_base + (i - in_pad_top)*in_row_stride;
-            for (int j = 0; j < inner_tile_cols; j++)
-            {
-                if (i < in_pad_top || in_cells_i <= i ||
-                    j < in_pad_left || in_cells_j <= j)
-                {
-                    u[i][j] = static_cast<uint8_t>(0);
-                }
-                else
-                {
-                    u[i][j] = static_cast<int32_t >(*(inptr_row + (j - in_pad_left)*in_col_stride)) + _input_offset;
-                }
-            }
-        }
-        inptr_base++;
-
-        // Load weights tile
-        int32_t w[kernel_rows][kernel_cols];
-        for (int i = 0; i < kernel_rows; i++)
-        {
-            const uint8_t* const wptr_row = wptr_base + i*weight_row_stride;
-            for (int j = 0; j < kernel_cols; j++)
-            {
-                w[i][j] = static_cast<int32_t >(*(wptr_row + j*weight_col_stride)) + _weights_offset;
-            }
-        }
-        wptr_base++;
-
-        // Perform the convolution
-        int32_t v[output_tile_rows][output_tile_cols];
-        for (int out_i = 0; out_i < out_cells_i; out_i++)
-        {
-            for (int out_j = 0; out_j < out_cells_j; out_j++)
-            {
-                // Clear the accumulator
-                v[out_i][out_j] = static_cast<int32_t>(0);
-
-                // Base co-ordinate
-                const int base_i = out_i * stride_rows;
-                const int base_j = out_j * stride_cols;
-
-                // Fill the accumulator
-                for (int in_i = 0; in_i < kernel_rows; in_i++)
-                {
-                    const int i = base_i + in_i;
-                    for (int in_j = 0; in_j < kernel_cols; in_j++)
-                    {
-                        const int j = base_j + in_j;
-                        v[out_i][out_j] += w[in_i][in_j] * u[i][j];
-                    }
-                }
-            }
-        }
-
-        // Store the output tile
-        for (int i = 0; i < out_cells_i; i++)
-        {
-            int32_t* const outptr_row = outptr_base + i*out_row_stride;
-            for (int j = 0; j < out_cells_j; j++)
-            {
-                *(outptr_row + j*out_col_stride) = v[i][j];
-            }
-        }
-        outptr_base++;
-    }
-}
-
-}  // namespace depthwise
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index f0fd4cf..5db94a6 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -31,112 +31,78 @@
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "support/ToolchainSupport.h"
 
-using namespace arm_compute;
+#include "arm_compute/core/utils/misc/InfoHelpers.h"
+
 using namespace arm_compute::misc;
 using namespace arm_compute::misc::shape_calculator;
 
-NEDepthwiseConvolutionLayer3x3::NEDepthwiseConvolutionLayer3x3()
-    : _dwc_kernel(), _output_stage_kernel(), _border_handler(), _permute_input(), _permute_weights(), _permute_output(), _activationlayer_function(), _accumulator(), _permuted_input(),
-      _permuted_weights(), _permuted_output(), _has_bias(false), _is_quantized(false), _is_optimized(false), _are_weights_reshaped(false), _is_nchw(true), _is_first_run(true), _permute(false),
-      _is_activationlayer_enabled(false)
+namespace arm_compute
+{
+NEDepthwiseConvolutionLayer3x3::NEDepthwiseConvolutionLayer3x3(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(memory_manager), _dwc_kernel(), _dwc_optimized_func(memory_manager), _output_stage_kernel(), _border_handler(), _permute_input(), _permute_weights(), _permute_output(),
+      _activationlayer_function(), _accumulator(), _permuted_input(), _permuted_weights(), _permuted_output(), _original_weights(nullptr), _has_bias(false), _is_quantized(false), _is_optimized(false),
+      _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false)
 {
 }
 
-void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
-                                               unsigned int depth_multiplier, const ActivationLayerInfo &act_info)
+void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor                   *input,
+                                                       const ITensor             *weights,
+                                                       const ITensor             *biases,
+                                                       ITensor                   *output,
+                                                       const PadStrideInfo       &conv_info,
+                                                       unsigned int               depth_multiplier,
+                                                       const ActivationLayerInfo &act_info)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+    ARM_COMPUTE_UNUSED(act_info);
 
     PixelValue zero_value(0.f);
 
-    _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-    _has_bias     = biases != nullptr;
-    _is_optimized = NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(input->info()->tensor_shape(),
-                                                                                          conv_info,
-                                                                                          input->info()->data_type(),
-                                                                                          depth_multiplier,
-                                                                                          input->info()->data_layout());
-    _are_weights_reshaped = false;
-    _is_nchw              = input->info()->data_layout() == DataLayout::NCHW;
-    _permute              = _is_optimized == _is_nchw;
-
     // Initialize the intermediate accumulator tensor in case of quantized input
     if(_is_quantized)
     {
         TensorShape accum_shape  = output->info()->tensor_shape();
         DataLayout  accum_layout = output->info()->data_layout();
-        if(!_is_optimized && !_is_nchw)
+        if(!_is_nchw)
         {
             permute(accum_shape, PermutationVector(1U, 2U, 0U));
             accum_layout = DataLayout::NCHW;
         }
 
+        _memory_group.manage(&_accumulator);
         _accumulator.allocator()->init(TensorInfo(accum_shape, 1, DataType::S32, output->info()->quantization_info()));
         _accumulator.info()->set_data_layout(accum_layout);
         zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
     }
 
-    if(_is_optimized)
+    if(!_is_nchw)
     {
-        ITensor *optimized_output = (_is_quantized) ? &_accumulator : output;
-        if(_is_nchw)
-        {
-            // Configure the function to transform the input tensor from NCHW -> NHWC
-            _permute_input.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
-            _permuted_input.info()->set_data_layout(DataLayout::NHWC);
+        _memory_group.manage(&_permuted_input);
+        _memory_group.manage(&_permuted_output);
 
-            // Configure the function to transform the weights tensor from IHW -> HWI
-            _permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
-            _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
+        // Configure the function to transform the input tensor from NHWC -> NCHW
+        _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
+        _permuted_input.info()->set_data_layout(DataLayout::NCHW);
 
-            // Configure optimized depthwise
-            _dwc_kernel.configure(&_permuted_input, &_permuted_weights, &_permuted_output, conv_info, depth_multiplier, DataLayout::NHWC);
+        // Configure the function to transform the weights tensor from HWI -> IHW
+        _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
+        _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
 
-            // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
-            _permuted_output.info()->set_data_layout(DataLayout::NHWC);
-            _permute_output.configure(&_permuted_output, optimized_output, PermutationVector(1U, 2U, 0U));
+        // Configure optimized depthwise
+        _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier);
 
-            // Allocate tensors
-            _permuted_input.allocator()->allocate();
-            _permuted_weights.allocator()->allocate();
-            _permuted_output.allocator()->allocate();
-        }
-        else
-        {
-            _dwc_kernel.configure(input, weights, optimized_output, conv_info, depth_multiplier, DataLayout::NHWC);
-        }
+        // Configure border handler
+        _border_handler.configure(&_permuted_input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
+
+        // Allocate tensors
+        _permuted_input.allocator()->allocate();
     }
     else
     {
-        if(!_is_nchw)
-        {
-            // Configure the function to transform the input tensor from NHWC -> NCHW
-            _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
-            _permuted_input.info()->set_data_layout(DataLayout::NCHW);
+        // Configure depthwise convolution kernel
+        _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier);
 
-            // Configure the function to transform the weights tensor from HWI -> IHW
-            _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
-            _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
-
-            // Configure optimized depthwise
-            _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier);
-
-            // Configure border handler
-            _border_handler.configure(&_permuted_input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
-
-            // Allocate tensors
-            _permuted_input.allocator()->allocate();
-            _permuted_weights.allocator()->allocate();
-        }
-        else
-        {
-            // Configure depthwise convolution kernel
-            _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier);
-
-            // Configure border handler
-            _border_handler.configure(input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
-        }
+        // Configure border handler
+        _border_handler.configure(input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
     }
 
     // Configure biases accumulation
@@ -147,32 +113,116 @@
         float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
         int   output_multiplier, output_shift;
         quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-        _output_stage_kernel.configure(&_accumulator, biases, (_is_nchw || _is_optimized) ? output : &_permuted_output, output_multiplier, output_shift, output_quant_info.offset);
+        _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, output_quant_info.offset);
         _accumulator.allocator()->allocate();
     }
     else if(_has_bias)
     {
-        _output_stage_kernel.configure((_is_nchw || _is_optimized) ? output : &_permuted_output, biases);
+        _output_stage_kernel.configure(_is_nchw ? output : &_permuted_output, biases);
     }
 
-    if(!_is_optimized && !_is_nchw)
+    // Permute output
+    if(!_is_nchw)
     {
         // Configure the function to transform the convoluted output to NHWC
         _permute_output.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U));
         _permuted_output.allocator()->allocate();
     }
+}
 
-    //Configure Activation Layer
+void NEDepthwiseConvolutionLayer3x3::configure_optimized(const ITensor             *input,
+                                                         const ITensor             *weights,
+                                                         const ITensor             *biases,
+                                                         ITensor                   *output,
+                                                         const PadStrideInfo       &conv_info,
+                                                         unsigned int               depth_multiplier,
+                                                         const ActivationLayerInfo &act_info)
+{
+    ActivationLayerInfo act_info_to_use = ActivationLayerInfo();
+    const bool          is_relu         = arm_compute::utils::info_helpers::is_relu(act_info);
+    const bool          is_relu6        = arm_compute::utils::info_helpers::is_relu6(act_info);
+    _is_activationlayer_enabled         = act_info.enabled() && !(is_relu || is_relu6);
+    if(!_is_activationlayer_enabled)
+    {
+        act_info_to_use = act_info;
+    }
+
+    if(_is_nchw)
+    {
+        _memory_group.manage(&_permuted_input);
+        _memory_group.manage(&_permuted_output);
+
+        // Configure the function to transform the input tensor from NCHW -> NHWC
+        _permute_input.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
+        _permuted_input.info()->set_data_layout(DataLayout::NHWC);
+
+        // Configure the function to transform the weights tensor from IHW -> HWI
+        _permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
+        _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
+
+        // Configure optimized depthwise
+        _dwc_optimized_func.configure(&_permuted_input, &_permuted_weights, biases, &_permuted_output, conv_info, depth_multiplier, act_info_to_use);
+
+        // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
+        _permuted_output.info()->set_data_layout(DataLayout::NHWC);
+        _permute_output.configure(&_permuted_output, output, PermutationVector(1U, 2U, 0U));
+
+        // Allocate tensors
+        _permuted_input.allocator()->allocate();
+        _permuted_output.allocator()->allocate();
+    }
+    else
+    {
+        _dwc_optimized_func.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info_to_use);
+    }
+}
+
+void NEDepthwiseConvolutionLayer3x3::configure(ITensor       *input,
+                                               const ITensor *weights,
+                                               const ITensor *biases,
+                                               ITensor *output, const PadStrideInfo &conv_info,
+                                               unsigned int               depth_multiplier,
+                                               const ActivationLayerInfo &act_info)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+
+    _original_weights = weights;
+    _is_quantized     = is_data_type_quantized_asymmetric(input->info()->data_type());
+    _has_bias         = biases != nullptr;
+    _is_optimized     = NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input->info(),
+                                                                                       weights->info(),
+                                                                                       conv_info,
+                                                                                       depth_multiplier);
+    _is_nchw                    = input->info()->data_layout() == DataLayout::NCHW;
+    _permute                    = _is_optimized == _is_nchw;
+    _is_prepared                = false;
     _is_activationlayer_enabled = act_info.enabled();
 
+    // Configure appropriate pipeline
+    if(_is_optimized)
+    {
+        configure_optimized(input, weights, biases, output, conv_info, depth_multiplier, act_info);
+    }
+    else
+    {
+        configure_generic(input, weights, biases, output, conv_info, depth_multiplier, act_info);
+    }
+
+    // Configure activation
     if(_is_activationlayer_enabled)
     {
         _activationlayer_function.configure(output, nullptr, act_info);
     }
 }
 
-Status NEDepthwiseConvolutionLayer3x3::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
-                                                unsigned int depth_multiplier, const ActivationLayerInfo &act_info)
+Status NEDepthwiseConvolutionLayer3x3::validate(const ITensorInfo         *input,
+                                                const ITensorInfo         *weights,
+                                                const ITensorInfo         *biases,
+                                                const ITensorInfo         *output,
+                                                const PadStrideInfo       &conv_info,
+                                                unsigned int               depth_multiplier,
+                                                const ActivationLayerInfo &act_info)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
     ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
@@ -184,14 +234,20 @@
         ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(channel_idx));
     }
 
-    const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
-    TensorInfo accumulator  = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
-
-    ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayer3x3Kernel::validate(input, weights, is_quantized ? &accumulator : output, conv_info, depth_multiplier));
-
-    if(is_quantized)
+    if(!NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input, weights, conv_info, depth_multiplier))
     {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, biases, output));
+        const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
+        TensorInfo accumulator  = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
+        ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayer3x3Kernel::validate(input, weights, is_quantized ? &accumulator : output, conv_info, depth_multiplier));
+
+        if(is_quantized)
+        {
+            ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, biases, output));
+        }
+    }
+    else
+    {
+        ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionAssemblyDispatch::validate(input, weights, biases, output, conv_info, depth_multiplier));
     }
 
     //Validate Activation Layer
@@ -203,43 +259,14 @@
     return Status{};
 }
 
-void NEDepthwiseConvolutionLayer3x3::run()
+void NEDepthwiseConvolutionLayer3x3::run_generic()
 {
-    if(_is_first_run && _is_optimized)
-    {
-        _is_first_run = false;
-        // Create convolver (deferred)
-        _dwc_kernel.generate_convolver();
-    }
-
-    // Permute weights
-    if(_permute)
-    {
-        if(!_are_weights_reshaped)
-        {
-            _are_weights_reshaped = true;
-            _permute_weights.run();
-        }
-
-        _permute_input.run();
-    }
-
-    // Handle input
-    if(!_is_optimized)
-    {
-        // Fill border
-        NEScheduler::get().schedule(&_border_handler, Window::DimX);
-    }
+    // Fill border
+    NEScheduler::get().schedule(&_border_handler, Window::DimX);
 
     // Execute depthwise convolution
     NEScheduler::get().schedule(&_dwc_kernel, Window::DimX);
 
-    // Permute output
-    if(_is_optimized && _is_nchw)
-    {
-        _permute_output.run();
-    }
-
     // Add biases
     if(_has_bias || _is_quantized)
     {
@@ -247,15 +274,71 @@
     }
 
     // Permute output
-    if(!_is_optimized && !_is_nchw)
+    if(!_is_nchw)
     {
         _permute_output.run();
     }
+}
 
+void NEDepthwiseConvolutionLayer3x3::run_optimized()
+{
+    // Run assembly function
+    _dwc_optimized_func.run();
+
+    // Permute output
+    if(_is_nchw)
+    {
+        _permute_output.run();
+    }
+}
+
+void NEDepthwiseConvolutionLayer3x3::run()
+{
+    prepare();
+
+    _memory_group.acquire();
+
+    // Permute input
+    if(_permute)
+    {
+        _permute_input.run();
+    }
+
+    _is_optimized ? run_optimized() : run_generic();
+
+    // Run activation
     if(_is_activationlayer_enabled)
     {
         _activationlayer_function.run();
     }
+
+    _memory_group.release();
+}
+
+void NEDepthwiseConvolutionLayer3x3::prepare()
+{
+    if(!_is_prepared)
+    {
+        // Permute weights
+        if(_permute)
+        {
+            _permuted_weights.allocator()->allocate();
+            _permute_weights.run();
+            _original_weights->mark_as_unused();
+        }
+
+        // Prepare optimized function
+        if(_is_optimized)
+        {
+            _dwc_optimized_func.prepare();
+            if(!_permuted_weights.is_used())
+            {
+                _permuted_weights.allocator()->free();
+            }
+        }
+
+        _is_prepared = true;
+    }
 }
 
 NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer()
@@ -542,3 +625,4 @@
         _is_prepared = true;
     }
 }
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
new file mode 100644
index 0000000..a644265
--- /dev/null
+++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h"
+
+#include "arm_compute/core/CPP/Validate.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/misc/InfoHelpers.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+namespace
+{
+std::unique_ptr<depthwise::IDepthwiseConvolution> create_convolver(const ITensor      *input,
+                                                                   const ITensor      *weights,
+                                                                   ITensor            *output,
+                                                                   PadStrideInfo       conv_info,
+                                                                   ActivationLayerInfo act_info)
+{
+    const DataType    data_type = input->info()->data_type();
+    const TensorShape shape     = input->info()->tensor_shape();
+
+    const int n_batches      = shape[3];
+    const int in_rows        = shape.z();
+    const int in_cols        = shape.y();
+    const int n_channels     = shape.x();
+    const int padding_top    = conv_info.pad_top();
+    const int padding_left   = conv_info.pad_left();
+    const int padding_bottom = conv_info.pad_bottom();
+    const int padding_right  = conv_info.pad_right();
+
+    const unsigned int stride_x = conv_info.stride().first;
+
+    // Map activation function
+    neon_convolution_kernels::ActivationFunction activation = neon_convolution_kernels::ActivationFunction::None;
+    if(arm_compute::utils::info_helpers::is_relu(act_info))
+    {
+        activation = neon_convolution_kernels::ActivationFunction::ReLU;
+    }
+    else if(arm_compute::utils::info_helpers::is_relu6(act_info))
+    {
+        activation = neon_convolution_kernels::ActivationFunction::ReLU6;
+    }
+
+    // Create quantized convolver
+    if(data_type == DataType::QASYMM8)
+    {
+        const QuantizationInfo &input_qinfo   = input->info()->quantization_info();
+        const QuantizationInfo &weights_qinfo = weights->info()->quantization_info();
+        const QuantizationInfo &output_qinfo  = output->info()->quantization_info();
+
+        // Check that quantization info are in the range [0, 255]
+        ARM_COMPUTE_ERROR_ON(input_qinfo.offset < 0 || input_qinfo.offset > 255);
+        ARM_COMPUTE_ERROR_ON(weights_qinfo.offset < 0 || weights_qinfo.offset > 255);
+        ARM_COMPUTE_ERROR_ON(output_qinfo.offset < 0 || output_qinfo.offset > 255);
+        const qasymm8::QAsymm8Params iqinfo{ static_cast<uint8_t>(input_qinfo.offset), input_qinfo.scale };
+        const qasymm8::QAsymm8Params wqinfo{ static_cast<uint8_t>(weights_qinfo.offset), weights_qinfo.scale };
+        const qasymm8::QAsymm8Params oqinfo{ static_cast<uint8_t>(output_qinfo.offset), output_qinfo.scale };
+
+        // Calculate rescale parameters
+        const float fmultipler  = iqinfo.scale * wqinfo.scale / oqinfo.scale;
+        int         qmultiplier = 0;
+        int         qshift      = 0;
+        quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift);
+        qasymm8::QAsymm8RescaleParams rescale_params(qshift, qmultiplier, fmultipler);
+
+        // Create convolver
+        switch(stride_x)
+        {
+            case 1:
+                return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
+                           n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
+            case 2:
+                return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
+                           n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
+            default:
+                return nullptr;
+        }
+    }
+    else
+    {
+        // Create float convolver
+        switch(data_type)
+        {
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+            case DataType::F16:
+            {
+                switch(stride_x)
+                {
+                    case 1:
+                        return arm_compute::support::cpp14::make_unique<depthwise::DepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>>(
+                                   n_batches, in_rows, in_cols, n_channels, activation, padding_top, padding_left, padding_bottom, padding_right);
+                    case 2:
+                        return arm_compute::support::cpp14::make_unique<depthwise::DepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>>(
+                                   n_batches, in_rows, in_cols, n_channels, activation, padding_top, padding_left, padding_bottom, padding_right);
+                    default:
+                        return nullptr;
+                }
+                break;
+            }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+            case DataType::F32:
+            {
+                switch(stride_x)
+                {
+                    case 1:
+                        return arm_compute::support::cpp14::make_unique<depthwise::DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>>(
+                                   n_batches, in_rows, in_cols, n_channels, activation, padding_top, padding_left, padding_bottom, padding_right);
+                    case 2:
+                        return arm_compute::support::cpp14::make_unique<depthwise::DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>>(
+                                   n_batches, in_rows, in_cols, n_channels, activation, padding_top, padding_left, padding_bottom, padding_right);
+                    default:
+                        return nullptr;
+                }
+                break;
+            }
+            default:
+                return nullptr;
+        }
+    }
+}
+} // namespace
+
+#ifndef DOXYGEN_SKIP_THIS
+NEDepthwiseConvolutionAssemblyDispatch::NEDepthwiseConvolutionAssemblyDispatch(std::shared_ptr<arm_compute::IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr), _packed_weights(), _workspace(), _is_prepared(false), _dwc_assembly_kernel(nullptr),
+      _dwc_acl_kernel()
+{
+}
+#endif /* DOXYGEN_SKIP_THIS */
+
+void NEDepthwiseConvolutionAssemblyDispatch::configure(const ITensor             *input,
+                                                       const ITensor             *weights,
+                                                       const ITensor             *bias,
+                                                       ITensor                   *output,
+                                                       const PadStrideInfo       &conv_info,
+                                                       unsigned int               depth_multiplier,
+                                                       const ActivationLayerInfo &act_info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+    ARM_COMPUTE_UNUSED(depth_multiplier);
+    ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionAssemblyDispatch::validate(input->info(),
+                                                                                weights->info(),
+                                                                                bias != nullptr ? bias->info() : nullptr,
+                                                                                output->info(),
+                                                                                conv_info,
+                                                                                depth_multiplier,
+                                                                                act_info));
+
+    // Output auto inizialitation if not yet initialized
+    const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier);
+    auto_init_if_empty(*output->info(), input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
+
+    _input       = input;
+    _weights     = weights;
+    _bias        = bias;
+    _output      = output;
+    _is_prepared = false;
+
+    // Create convolver
+    _dwc_assembly_kernel = create_convolver(input, weights, output, conv_info, act_info);
+    ARM_COMPUTE_ERROR_ON(_dwc_assembly_kernel == nullptr);
+
+    // Create assembly kernel wrapper
+    _dwc_acl_kernel.configure(_dwc_assembly_kernel.get());
+
+    constexpr size_t alignment = 128;
+
+    // Create workspace
+    const unsigned int num_threads    = NEScheduler::get().num_threads();
+    const size_t       workspace_size = _dwc_assembly_kernel->get_working_space_size(num_threads);
+    ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "Workspace size cannot be 0 !");
+    _workspace.allocator()->init(TensorInfo(TensorShape{ workspace_size }, 1, DataType::S8), alignment);
+    _memory_group.manage(&_workspace);
+    _workspace.allocator()->allocate();
+
+    // Create packing tensor
+    const size_t pack_tensor_size = _dwc_assembly_kernel->get_packed_params_size();
+    ARM_COMPUTE_ERROR_ON_MSG(pack_tensor_size == 0, "Pack tensor size cannot be 0 !");
+    _packed_weights.allocator()->init(TensorInfo(TensorShape{ pack_tensor_size }, 1, DataType::S8), alignment);
+}
+
+Status NEDepthwiseConvolutionAssemblyDispatch::validate(const ITensorInfo         *input,
+                                                        const ITensorInfo         *weights,
+                                                        const ITensorInfo         *bias,
+                                                        const ITensorInfo         *output,
+                                                        const PadStrideInfo       &conv_info,
+                                                        unsigned int               depth_multiplier,
+                                                        const ActivationLayerInfo &act_info)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
+
+    const auto       strides     = conv_info.stride();
+    const DataLayout data_layout = input->data_layout();
+    unsigned int     width_idx   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+    unsigned int     height_idx  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+    ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) != 3 || weights->dimension(height_idx) != 3);
+    ARM_COMPUTE_RETURN_ERROR_ON(!((strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2))));
+    ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier != 1);
+
+    const bool is_relu  = arm_compute::utils::info_helpers::is_relu(act_info);
+    const bool is_relu6 = arm_compute::utils::info_helpers::is_relu6(act_info);
+    ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !(is_relu || is_relu6));
+
+    // Check bias
+    if(bias != nullptr)
+    {
+        unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+        ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+        ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights->dimension(channel_idx));
+    }
+
+    // Check output
+    if(output->total_size() != 0)
+    {
+        const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    }
+
+    return Status{};
+}
+
+bool NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(const ITensorInfo *input,
+                                                                    const ITensorInfo *weights,
+                                                                    PadStrideInfo      conv_info,
+                                                                    unsigned int       depth_multiplier)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
+
+    // Reshape input shape if in NHWC format
+    const DataLayout data_layout = input->data_layout();
+    TensorShape      in_shape{ input->tensor_shape() };
+    if(data_layout == DataLayout::NHWC)
+    {
+        in_shape.set(Window::DimX, input->tensor_shape().y());
+        in_shape.set(Window::DimY, input->tensor_shape().z());
+        in_shape.set(Window::DimZ, input->tensor_shape().x());
+    }
+
+    // Check data type
+    const DataType data_type          = weights->data_type();
+    bool           is_data_type_valid = is_data_type_float(data_type) || is_data_type_quantized_asymmetric(data_type);
+
+    // Check weighs size
+    const unsigned int width_idx         = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+    const unsigned int height_idx        = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+    bool               weights_supported = (weights->dimension(width_idx) == 3) && (weights->dimension(height_idx) == 3);
+
+    // Check for supported strides
+    const auto &strides           = conv_info.stride();
+    bool        supported_strides = (strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2));
+
+    // Check for supported padding
+    const auto    pad_top           = conv_info.pad_top();
+    const auto    pad_right         = conv_info.pad_right();
+    const auto    pad_bottom        = conv_info.pad_bottom();
+    const auto    pad_left          = conv_info.pad_left();
+    PadStrideInfo same_pad          = calculate_same_pad(in_shape, TensorShape(3U, 3U), conv_info);
+    bool          is_same_padding   = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left());
+    bool          is_valid_padding  = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0);
+    bool          supported_padding = is_same_padding || is_valid_padding;
+
+    return is_data_type_valid && weights_supported && supported_strides && supported_padding && (depth_multiplier == 1);
+}
+
+void NEDepthwiseConvolutionAssemblyDispatch::run()
+{
+    // Prepare assembly kernel
+    prepare();
+
+    _memory_group.acquire();
+
+    // Setup inputs/outputs
+    ARM_COMPUTE_ERROR_ON(_workspace.buffer() == nullptr);
+    _dwc_assembly_kernel->set_working_space(static_cast<void *>(_workspace.buffer()));
+
+    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
+    const int   input_element_size = _input->info()->element_size();
+    const int   input_batch_stride = _input->info()->strides_in_bytes()[3] / input_element_size;
+    const int   input_row_stride   = _input->info()->strides_in_bytes().z() / input_element_size;
+    const int   input_col_stride   = _input->info()->strides_in_bytes().y() / input_element_size;
+    const void *input_ptr          = _input->buffer() + _input->info()->offset_first_element_in_bytes();
+    _dwc_assembly_kernel->set_input(input_ptr, input_batch_stride, input_row_stride, input_col_stride);
+
+    ARM_COMPUTE_ERROR_ON(_output->buffer() == nullptr);
+    const int output_element_size = _output->info()->element_size();
+    const int output_batch_stride = _output->info()->strides_in_bytes()[3] / output_element_size;
+    const int output_row_stride   = _output->info()->strides_in_bytes().z() / output_element_size;
+    const int output_col_stride   = _output->info()->strides_in_bytes().y() / output_element_size;
+    void     *output_ptr          = _output->buffer() + _output->info()->offset_first_element_in_bytes();
+    _dwc_assembly_kernel->set_output(output_ptr, output_batch_stride, output_row_stride, output_col_stride);
+
+    // Schedule assembly kernel
+    NEScheduler::get().schedule(&_dwc_acl_kernel, Window::DimX);
+
+    _memory_group.release();
+}
+
+void NEDepthwiseConvolutionAssemblyDispatch::prepare()
+{
+    if(!_is_prepared)
+    {
+        _packed_weights.allocator()->allocate();
+        ARM_COMPUTE_ERROR_ON(_packed_weights.buffer() == nullptr);
+
+        // Pack weights and bias
+        const int weights_element_size = _weights->info()->element_size();
+        const int weights_row_stride   = _weights->info()->strides_in_bytes().z() / weights_element_size;
+        const int weights_col_stride   = _weights->info()->strides_in_bytes().y() / weights_element_size;
+        _dwc_assembly_kernel->pack_params(_packed_weights.buffer(),
+                                          _weights->buffer() + _weights->info()->offset_first_element_in_bytes(),
+                                          weights_row_stride,
+                                          weights_col_stride,
+                                          (_bias != nullptr) ? _bias->buffer() : nullptr);
+        _dwc_assembly_kernel->set_packed_params_buffer(_packed_weights.buffer());
+
+        _weights->mark_as_unused();
+        if(_bias != nullptr)
+        {
+            _bias->mark_as_unused();
+        }
+        _is_prepared = true;
+    }
+}
+} // namespace arm_compute