Remove Compute Vision CL support

Resolves COMPMID-4151

Change-Id: I46f541efe8c4087f27794d2e158b6c1547d459ba
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5160
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/src/core/NEON/kernels/NEConvolutionKernel.cpp b/src/core/NEON/kernels/NEConvolutionKernel.cpp
deleted file mode 100644
index 075de41..0000000
--- a/src/core/NEON/kernels/NEConvolutionKernel.cpp
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEConvolutionKernel.h"
-
-#include "arm_compute/core/Coordinates.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <algorithm>
-#include <arm_neon.h>
-#include <array>
-#include <cstdint>
-#include <cstring>
-#include <tuple>
-
-namespace arm_compute
-{
-namespace
-{
-const uint16x8_t max_int16 = vdupq_n_u16(INT16_MAX);
-
-inline void store_results(const int32x4_t &out, const int32x4_t &out2, int16_t *output)
-{
-    const int16x8_t s16results = vcombine_s16(vqmovn_s32(out),
-                                              vqmovn_s32(out2));
-    vst1q_s16(output, s16results);
-}
-
-inline void store_results(const int32x4_t &out, const int32x4_t &out2, uint8_t *output)
-{
-    const uint8x8_t u8results = vqmovn_u16(vcombine_u16(vqmovun_s32(out),
-                                                        vqmovun_s32(out2)));
-    vst1_u8(output, u8results);
-}
-
-inline void store_results(const uint32x4_t &out, const uint32x4_t &out2, int16_t *output)
-{
-    const uint16x8_t u16results = vcombine_u16(vqmovn_u32(out), vqmovn_u32(out2));
-    const int16x8_t  s16results = vreinterpretq_s16_u16(vminq_u16(u16results, max_int16));
-    vst1q_s16(output, s16results);
-}
-
-inline void store_results(const uint32x4_t &out, const uint32x4_t &out2, uint8_t *output)
-{
-    const uint8x8_t u8results = vqmovn_u16(vcombine_u16(vqmovn_u32(out),
-                                                        vqmovn_u32(out2)));
-    vst1_u8(output, u8results);
-}
-
-inline void store_results(const int16x8_t &out, const int16x8_t &out2, int16_t *output)
-{
-    vst1q_s16(output, out);
-    vst1q_s16(output + 8, out2);
-}
-
-inline void store_results(const int16x8_t &out, const int16x8_t &out2, uint8_t *output)
-{
-    const uint8x16_t u8results = vcombine_u8(vqmovun_s16(out),
-                                             vqmovun_s16(out2));
-    vst1q_u8(output, u8results);
-}
-
-inline void store_results(const uint16x8_t &out, const uint16x8_t &out2, uint8_t *output)
-{
-    const uint8x16_t u8results = vcombine_u8(vqmovn_u16(out),
-                                             vqmovn_u16(out2));
-    vst1q_u8(output, u8results);
-}
-
-inline void store_results(const uint16x8_t &out, const uint16x8_t &out2, int16_t *output)
-{
-    vst1q_s16(output, vreinterpretq_s16_u16(vminq_u16(out, max_int16)));
-    vst1q_s16(output + 8, vreinterpretq_s16_u16(vminq_u16(out2, max_int16)));
-}
-
-inline void convolve_row3x1_unrolled(int32x4_t &out, int32x4_t &out2, const uint8x16_t &row_data, const int16x4_t &mat0, const int16x4_t &mat1, const int16x4_t &mat2)
-{
-    // Convert to s16 and split in blocks of 4 values:
-    const int16x8_t s16_tmp0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(row_data)));
-    const int16x8_t s16_tmp1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(row_data)));
-
-    const int16x4x3_t row =
-    {
-        {
-            vget_low_s16(s16_tmp0),
-            vget_high_s16(s16_tmp0),
-            vget_low_s16(s16_tmp1)
-        }
-    };
-
-    // Calculate row left value for pixels [0,3]
-    out = vmlal_s16(out, row.val[0], mat0);
-    // Calculate row middle value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 1), mat1);
-    // Calculate row right value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 2), mat2);
-
-    // Calculate row left value for pixels [4,7]
-    out2 = vmlal_s16(out2, row.val[1], mat0);
-    // Calculate row middle value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 1), mat1);
-    // Calculate row right value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 2), mat2);
-}
-
-inline void convolve_row3x1(int32x4_t &out, int32x4_t &out2, const uint8x16_t &row_data, const int16_t *convolution)
-{
-    const int16x4_t mat0 = vld1_dup_s16(convolution);
-    const int16x4_t mat1 = vld1_dup_s16(convolution + 1);
-    const int16x4_t mat2 = vld1_dup_s16(convolution + 2);
-
-    convolve_row3x1_unrolled(out, out2, row_data, mat0, mat1, mat2);
-}
-
-inline void convolve_row5x1(int32x4_t &out, int32x4_t &out2, const uint8x16_t &row_data, const int16_t *convolution)
-{
-    const int16x4_t mat0 = vld1_dup_s16(convolution);
-    const int16x4_t mat1 = vld1_dup_s16(convolution + 1);
-    const int16x4_t mat2 = vld1_dup_s16(convolution + 2);
-    const int16x4_t mat3 = vld1_dup_s16(convolution + 3);
-    const int16x4_t mat4 = vld1_dup_s16(convolution + 4);
-
-    // Convert to s16 and split in blocks of 4 values:
-    const int16x8_t s16_tmp0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(row_data)));
-    const int16x8_t s16_tmp1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(row_data)));
-
-    const int16x4x3_t row =
-    {
-        {
-            vget_low_s16(s16_tmp0),
-            vget_high_s16(s16_tmp0),
-            vget_low_s16(s16_tmp1)
-        }
-    };
-
-    // Calculate row left 2 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[0], mat0);
-    // Calculate row left 1 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 1), mat1);
-    // Calculate row middle value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 2), mat2);
-    // Calculate row right +1 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 3), mat3);
-    // Calculate row right +2 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[1], mat4);
-
-    // Calculate row left 2 value for pixels [4,7]
-    out2 = vmlal_s16(out2, row.val[1], mat0);
-    // Calculate row left 1 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 1), mat1);
-    // Calculate row middle value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 2), mat2);
-    // Calculate row right +1 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 3), mat3);
-    // Calculate row right +2 value for pixels [4,7]
-    out2 = vmlal_s16(out2, row.val[2], mat4);
-}
-
-inline void convolve_row7x1(int32x4_t &out, int32x4_t &out2, const uint8x16_t &row_data, const int16_t *convolution)
-{
-    const int16x4_t mat0 = vld1_dup_s16(convolution);
-    const int16x4_t mat1 = vld1_dup_s16(convolution + 1);
-    const int16x4_t mat2 = vld1_dup_s16(convolution + 2);
-    const int16x4_t mat3 = vld1_dup_s16(convolution + 3);
-    const int16x4_t mat4 = vld1_dup_s16(convolution + 4);
-    const int16x4_t mat5 = vld1_dup_s16(convolution + 5);
-    const int16x4_t mat6 = vld1_dup_s16(convolution + 6);
-
-    // Convert to s16 and split in blocks of 4 values:
-    const int16x8_t s16_tmp0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(row_data)));
-    const int16x8_t s16_tmp1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(row_data)));
-
-    const int16x4x4_t row =
-    {
-        {
-            vget_low_s16(s16_tmp0),
-            vget_high_s16(s16_tmp0),
-            vget_low_s16(s16_tmp1),
-            vget_high_s16(s16_tmp1)
-        }
-    };
-
-    // Calculate row left 3 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[0], mat0);
-    // Calculate row left 2 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 1), mat1);
-    // Calculate row left 1 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 2), mat2);
-    // Calculate row middle value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 3), mat3);
-    // Calculate row right +1 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[1], mat4);
-    // Calculate row right +2 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[1], row.val[2], 1), mat5);
-    // Calculate row right +3 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[1], row.val[2], 2), mat6);
-
-    // Calculate row left 3 value for pixels [4,7]
-    out2 = vmlal_s16(out2, row.val[1], mat0);
-    // Calculate row left 2 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 1), mat1);
-    // Calculate row left 1 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 2), mat2);
-    // Calculate row middle value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 3), mat3);
-    // Calculate row right +1 value for pixels [4,7]
-    out2 = vmlal_s16(out2, row.val[2], mat4);
-    // Calculate row right +2 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[2], row.val[3], 1), mat5);
-    // Calculate row right +3 value for pixels [4,7]
-    out2 = vmlal_s16(out2, vext_s16(row.val[2], row.val[3], 2), mat6);
-}
-
-inline void convolve_row9x1(int32x4_t &out, int32x4_t &out2, const uint8x16_t &row_data, const int16_t *convolution)
-{
-    const int16x4_t mat0 = vld1_dup_s16(convolution);
-    const int16x4_t mat1 = vld1_dup_s16(convolution + 1);
-    const int16x4_t mat2 = vld1_dup_s16(convolution + 2);
-    const int16x4_t mat3 = vld1_dup_s16(convolution + 3);
-    const int16x4_t mat4 = vld1_dup_s16(convolution + 4);
-    const int16x4_t mat5 = vld1_dup_s16(convolution + 5);
-    const int16x4_t mat6 = vld1_dup_s16(convolution + 6);
-    const int16x4_t mat7 = vld1_dup_s16(convolution + 7);
-    const int16x4_t mat8 = vld1_dup_s16(convolution + 8);
-
-    // Convert to s16 and split in blocks of 4 values:
-    const int16x8_t s16_tmp0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(row_data)));
-    const int16x8_t s16_tmp1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(row_data)));
-
-    const int16x4x4_t row =
-    {
-        {
-            vget_low_s16(s16_tmp0),
-            vget_high_s16(s16_tmp0),
-            vget_low_s16(s16_tmp1),
-            vget_high_s16(s16_tmp1)
-        }
-    };
-
-    // Calculate row left 4 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[0], mat0);
-    // Calculate row left 3 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 1), mat1);
-    // Calculate row left 2 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 2), mat2);
-    // Calculate row left 1 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[0], row.val[1], 3), mat3);
-    // Calculate row middle value for pixels [0,3]
-    out = vmlal_s16(out, row.val[1], mat4);
-    // Calculate row right +1 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[1], row.val[2], 1), mat5);
-    // Calculate row right +2 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[1], row.val[2], 2), mat6);
-    // Calculate row right +3 value for pixels [0,3]
-    out = vmlal_s16(out, vext_s16(row.val[1], row.val[2], 3), mat7);
-    // Calculate row right +4 value for pixels [0,3]
-    out = vmlal_s16(out, row.val[2], mat8);
-
-    // Calculate row left 4 value for pixels [0,3]
-    out2 = vmlal_s16(out2, row.val[1], mat0);
-    // Calculate row left 3 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 1), mat1);
-    // Calculate row left 2 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 2), mat2);
-    // Calculate row left 1 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[1], row.val[2], 3), mat3);
-    // Calculate row middle value for pixels [0,3]
-    out2 = vmlal_s16(out2, row.val[2], mat4);
-    // Calculate row right +1 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[2], row.val[3], 1), mat5);
-    // Calculate row right +2 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[2], row.val[3], 2), mat6);
-    // Calculate row right +3 value for pixels [0,3]
-    out2 = vmlal_s16(out2, vext_s16(row.val[2], row.val[3], 3), mat7);
-    // Calculate row right +4 value for pixels [0,3]
-    out2 = vmlal_s16(out2, row.val[3], mat8);
-}
-} // namespace
-
-/****************************************************************************************\
- *                                    Square Convolution                                *
-\****************************************************************************************/
-
-template <unsigned int matrix_size>
-NEConvolutionKernel<matrix_size>::NEConvolutionKernel()
-    : INESimpleKernel(), _scale(0), _convolution{ {} }
-{
-}
-
-template <unsigned int matrix_size>
-BorderSize             NEConvolutionKernel<matrix_size>::border_size() const
-{
-    return BorderSize{ matrix_size / 2 };
-}
-
-template <unsigned int matrix_size>
-void NEConvolutionKernel<matrix_size>::configure(const ITensor *input, ITensor *output, const int16_t *conv, uint32_t scale, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, conv);
-
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
-
-    _input  = input;
-    _output = output;
-
-    std::copy_n(conv, _convolution.size(), _convolution.begin());
-
-    if(scale == 0)
-    {
-        _scale = calculate_matrix_scale(_convolution.data(), matrix_size);
-    }
-    else
-    {
-        _scale = scale;
-    }
-
-    // Configure kernel window
-    constexpr unsigned int num_elems_processed_per_iteration = 8;
-    constexpr unsigned int num_elems_read_per_iteration      = 16;
-    constexpr unsigned int num_elems_written_per_iteration   = 8;
-
-    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowRectangle(input->info(), -border_size().left, -border_size().top, num_elems_read_per_iteration, matrix_size),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size());
-
-    INEKernel::configure(win);
-}
-
-template <>
-template <typename OutputType>
-void NEConvolutionKernel<3>::convolution(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    // Load the matrix's coefficients into Neon registers:
-    const int16x4_t   mat00     = vld1_dup_s16(_convolution.data());
-    const int16x4_t   mat01     = vld1_dup_s16(_convolution.data() + 1);
-    const int16x4_t   mat02     = vld1_dup_s16(_convolution.data() + 2);
-    const int16x4_t   mat10     = vld1_dup_s16(_convolution.data() + 3);
-    const int16x4_t   mat11     = vld1_dup_s16(_convolution.data() + 4);
-    const int16x4_t   mat12     = vld1_dup_s16(_convolution.data() + 5);
-    const int16x4_t   mat20     = vld1_dup_s16(_convolution.data() + 6);
-    const int16x4_t   mat21     = vld1_dup_s16(_convolution.data() + 7);
-    const int16x4_t   mat22     = vld1_dup_s16(_convolution.data() + 8);
-    const float32x4_t scale_val = vdupq_n_f32(1.0f / _scale);
-
-    const unsigned char *input_top_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-1, -1));
-    const unsigned char *input_mid_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-1, 0));
-    const unsigned char *input_low_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-1, 1));
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4_t out  = vdupq_n_s32(0);
-        int32x4_t out2 = vdupq_n_s32(0);
-
-        // Load 16 bytes from the top row:
-        const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
-        convolve_row3x1_unrolled(out, out2, top_data, mat00, mat01, mat02);
-
-        // Load 16 bytes from the middle row:
-        const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
-        convolve_row3x1_unrolled(out, out2, mid_data, mat10, mat11, mat12);
-
-        // Load 16 bytes from the middle row:
-        const uint8x16_t low_data = vld1q_u8(input_low_ptr + input.offset());
-        convolve_row3x1_unrolled(out, out2, low_data, mat20, mat21, mat22);
-
-        // Apply scale
-        if(_scale != 1)
-        {
-            // Convert to F32, scale and convert back to S32
-            out  = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out), scale_val));
-            out2 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out2), scale_val));
-        }
-
-        // Clamp and store as U8 or S16:
-        store_results(out, out2, reinterpret_cast<OutputType *>(output.ptr()));
-    },
-    input, output);
-}
-
-template <>
-template <typename OutputType>
-void NEConvolutionKernel<5>::convolution(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    const float32x4_t scale_val = vdupq_n_f32(1.0f / _scale);
-
-    const unsigned char *input_top2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, -2));
-    const unsigned char *input_top1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, -1));
-    const unsigned char *input_mid_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, 0));
-    const unsigned char *input_low1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, 1));
-    const unsigned char *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, 2));
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4_t out  = vdupq_n_s32(0);
-        int32x4_t out2 = vdupq_n_s32(0);
-
-        // Load 16 bytes from the top2 row:
-        const uint8x16_t data_t2 = vld1q_u8(input_top2_ptr + input.offset());
-        convolve_row5x1(out, out2, data_t2, _convolution.data());
-
-        // Load 16 bytes from the top1 row:
-        const uint8x16_t data_t1 = vld1q_u8(input_top1_ptr + input.offset());
-        convolve_row5x1(out, out2, data_t1, _convolution.data() + 5);
-
-        // Load 16 bytes from the middle row:
-        const uint8x16_t data_m = vld1q_u8(input_mid_ptr + input.offset());
-        convolve_row5x1(out, out2, data_m, _convolution.data() + 10);
-
-        // Load 16 bytes from the low1 row:
-        const uint8x16_t data_b1 = vld1q_u8(input_low1_ptr + input.offset());
-        convolve_row5x1(out, out2, data_b1, _convolution.data() + 15);
-
-        // Load 16 bytes from the low2 row:
-        const uint8x16_t data_b2 = vld1q_u8(input_low2_ptr + input.offset());
-        convolve_row5x1(out, out2, data_b2, _convolution.data() + 20);
-
-        // Apply scale
-        if(_scale != 1)
-        {
-            // Convert to F32, scale and convert back to S32
-            out  = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out), scale_val));
-            out2 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out2), scale_val));
-        }
-
-        // Clamp and store as U8 or S16:
-        store_results(out, out2, reinterpret_cast<OutputType *>(output.ptr()));
-    },
-    input, output);
-}
-
-template <>
-template <typename OutputType>
-void NEConvolutionKernel<7>::convolution(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    const float32x4_t scale_val = vdupq_n_f32(1.0f / _scale);
-
-    const unsigned char *input_top3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, -3));
-    const unsigned char *input_top2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, -2));
-    const unsigned char *input_top1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, -1));
-    const unsigned char *input_mid_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 0));
-    const unsigned char *input_low1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 1));
-    const unsigned char *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 2));
-    const unsigned char *input_low3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 3));
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4_t out  = vdupq_n_s32(0);
-        int32x4_t out2 = vdupq_n_s32(0);
-
-        // Load 16 bytes from the top3 row:
-        const uint8x16_t data_t3 = vld1q_u8(input_top3_ptr + input.offset());
-        convolve_row7x1(out, out2, data_t3, _convolution.data());
-
-        // Load 16 bytes from the top2 row:
-        const uint8x16_t data_t2 = vld1q_u8(input_top2_ptr + input.offset());
-        convolve_row7x1(out, out2, data_t2, _convolution.data() + 7);
-
-        // Load 16 bytes from the top1 row:
-        const uint8x16_t data_t1 = vld1q_u8(input_top1_ptr + input.offset());
-        convolve_row7x1(out, out2, data_t1, _convolution.data() + 14);
-
-        // Load 16 bytes from the middle row:
-        const uint8x16_t data_m = vld1q_u8(input_mid_ptr + input.offset());
-        convolve_row7x1(out, out2, data_m, _convolution.data() + 21);
-
-        // Load 16 bytes from the low1 row:
-        const uint8x16_t data_b1 = vld1q_u8(input_low1_ptr + input.offset());
-        convolve_row7x1(out, out2, data_b1, _convolution.data() + 28);
-
-        // Load 16 bytes from the low2 row:
-        const uint8x16_t data_b2 = vld1q_u8(input_low2_ptr + input.offset());
-        convolve_row7x1(out, out2, data_b2, _convolution.data() + 35);
-
-        // Load 16 bytes from the low3 row:
-        const uint8x16_t data_b3 = vld1q_u8(input_low3_ptr + input.offset());
-        convolve_row7x1(out, out2, data_b3, _convolution.data() + 42);
-
-        // Apply scale
-        if(_scale != 1)
-        {
-            // Convert to F32, scale and convert back to S32
-            out  = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out), scale_val));
-            out2 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out2), scale_val));
-        }
-
-        // Clamp and store as U8 or S16:
-        store_results(out, out2, reinterpret_cast<OutputType *>(output.ptr()));
-    },
-    input, output);
-}
-
-template <>
-template <typename OutputType>
-void NEConvolutionKernel<9>::convolution(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    const float32x4_t scale_val = vdupq_n_f32(1.0f / _scale);
-
-    const unsigned char *input_top4_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, -4));
-    const unsigned char *input_top3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, -3));
-    const unsigned char *input_top2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, -2));
-    const unsigned char *input_top1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, -1));
-    const unsigned char *input_mid_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 0));
-    const unsigned char *input_low1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 1));
-    const unsigned char *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 2));
-    const unsigned char *input_low3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 3));
-    const unsigned char *input_low4_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 4));
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4_t out  = vdupq_n_s32(0);
-        int32x4_t out2 = vdupq_n_s32(0);
-
-        // Load 16 bytes from the top4 row:
-        const uint8x16_t data_t4 = vld1q_u8(input_top4_ptr + input.offset());
-        convolve_row9x1(out, out2, data_t4, _convolution.data());
-
-        // Load 16 bytes from the top3 row:
-        const uint8x16_t data_t3 = vld1q_u8(input_top3_ptr + input.offset());
-        convolve_row9x1(out, out2, data_t3, _convolution.data() + 9);
-
-        // Load 16 bytes from the top2 row:
-        const uint8x16_t data_t2 = vld1q_u8(input_top2_ptr + input.offset());
-        convolve_row9x1(out, out2, data_t2, _convolution.data() + 18);
-
-        // Load 16 bytes from the top1 row:
-        const uint8x16_t data_t1 = vld1q_u8(input_top1_ptr + input.offset());
-        convolve_row9x1(out, out2, data_t1, _convolution.data() + 27);
-
-        // Load 16 bytes from the middle row:
-        const uint8x16_t data_m = vld1q_u8(input_mid_ptr + input.offset());
-        convolve_row9x1(out, out2, data_m, _convolution.data() + 36);
-
-        // Load 16 bytes from the low1 row:
-        const uint8x16_t data_b1 = vld1q_u8(input_low1_ptr + input.offset());
-        convolve_row9x1(out, out2, data_b1, _convolution.data() + 45);
-
-        // Load 16 bytes from the low2 row:
-        const uint8x16_t data_b2 = vld1q_u8(input_low2_ptr + input.offset());
-        convolve_row9x1(out, out2, data_b2, _convolution.data() + 54);
-
-        // Load 16 bytes from the low3 row:
-        const uint8x16_t data_b3 = vld1q_u8(input_low3_ptr + input.offset());
-        convolve_row9x1(out, out2, data_b3, _convolution.data() + 63);
-
-        // Load 16 bytes from the low4 row:
-        const uint8x16_t data_b4 = vld1q_u8(input_low4_ptr + input.offset());
-        convolve_row9x1(out, out2, data_b4, _convolution.data() + 72);
-
-        // Apply scale
-        if(_scale != 1)
-        {
-            // Convert to F32, scale and convert back to S32
-            out  = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out), scale_val));
-            out2 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out2), scale_val));
-        }
-
-        // Clamp and store as U8 or S16:
-        store_results(out, out2, reinterpret_cast<OutputType *>(output.ptr()));
-    },
-    input, output);
-}
-
-template <unsigned int matrix_size>
-void NEConvolutionKernel<matrix_size>::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
-    switch(_output->info()->data_type())
-    {
-        case DataType::U8:
-            convolution<uint8_t>(window);
-            break;
-        case DataType::S16:
-            convolution<int16_t>(window);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Not supported Data type!");
-            break;
-    }
-}
-
-template class arm_compute::NEConvolutionKernel<3>;
-template class arm_compute::NEConvolutionKernel<5>;
-template class arm_compute::NEConvolutionKernel<7>;
-template class arm_compute::NEConvolutionKernel<9>;
-
-/****************************************************************************************\
- *                              Separable Square Convolution                            *
-\****************************************************************************************/
-
-template <unsigned int matrix_size>
-NESeparableConvolutionHorKernel<matrix_size>::NESeparableConvolutionHorKernel()
-    : _conv_row{ { 0 } }, _border_size(0)
-{
-}
-
-template <unsigned int matrix_size>
-BorderSize             NESeparableConvolutionHorKernel<matrix_size>::border_size() const
-{
-    return _border_size;
-}
-
-template <unsigned int matrix_size>
-void NESeparableConvolutionHorKernel<matrix_size>::configure(const ITensor *input, ITensor *output, const int16_t *conv_row, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, conv_row);
-
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16, DataType::S16, DataType::S32);
-
-    _input  = input;
-    _output = output;
-    std::copy_n(conv_row, _conv_row.size(), _conv_row.begin());
-    _border_size = BorderSize(border_undefined ? 0 : matrix_size / 2, matrix_size / 2);
-
-    // Configure kernel window
-    constexpr unsigned int num_elems_processed_per_iteration = 8;
-    constexpr unsigned int num_elems_read_per_iteration      = 16;
-    constexpr unsigned int num_elems_written_per_iteration   = 8;
-
-    Window                 win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowHorizontal(input->info(), -border_size().left, num_elems_read_per_iteration),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size());
-
-    INEKernel::configure(win);
-}
-
-template <unsigned int matrix_size>
-void NESeparableConvolutionHorKernel<matrix_size>::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-    switch(_output->info()->data_type())
-    {
-        case DataType::U16:
-            convolve<uint16_t>(window);
-            break;
-        case DataType::S16:
-            convolve<int16_t>(window);
-            break;
-        case DataType::S32:
-            convolve<int32_t>(window);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Unsupported intermediate data type!");
-            break;
-    }
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<5>::convolve<uint16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -2);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const uint16x8x2_t data_u16 =
-        {
-            {
-                vmovl_u8(vget_low_u8(data)),
-                vmovl_u8(vget_high_u8(data))
-            }
-        };
-
-        uint16x8_t out = vmulq_n_u16(data_u16.val[0], _conv_row[0]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 1), _conv_row[1]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 2), _conv_row[2]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 3), _conv_row[3]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 4), _conv_row[4]);
-
-        vst1q_u16(reinterpret_cast<uint16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<5>::convolve<int16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -2);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        int16x8_t out = vmulq_n_s16(data_s16.val[0], _conv_row[0]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 1), _conv_row[1]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 2), _conv_row[2]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 3), _conv_row[3]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 4), _conv_row[4]);
-
-        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-void NESeparableConvolutionHorKernel<5>::convolve<int32_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -2);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        const int16x8_t data_s16_l1 = vextq_s16(data_s16.val[0], data_s16.val[1], 1);
-        const int16x8_t data_s16_m  = vextq_s16(data_s16.val[0], data_s16.val[1], 2);
-        const int16x8_t data_s16_r1 = vextq_s16(data_s16.val[0], data_s16.val[1], 3);
-        const int16x8_t data_s16_r2 = vextq_s16(data_s16.val[0], data_s16.val[1], 4);
-
-        int32x4_t out_low = vmull_n_s16(vget_low_s16(data_s16.val[0]), _conv_row[0]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l1), _conv_row[1]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_m), _conv_row[2]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r1), _conv_row[3]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r2), _conv_row[4]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), out_low);
-
-        int32x4_t out_high = vmull_n_s16(vget_high_s16(data_s16.val[0]), _conv_row[0]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l1), _conv_row[1]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_m), _conv_row[2]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r1), _conv_row[3]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r2), _conv_row[4]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, out_high);
-    },
-    input, output);
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<7>::convolve<uint16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -3);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const uint16x8x2_t data_u16 =
-        {
-            {
-                vmovl_u8(vget_low_u8(data)),
-                vmovl_u8(vget_high_u8(data))
-            }
-        };
-
-        uint16x8_t out = vmulq_n_u16(data_u16.val[0], _conv_row[0]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 1), _conv_row[1]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 2), _conv_row[2]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 3), _conv_row[3]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 4), _conv_row[4]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 5), _conv_row[5]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 6), _conv_row[6]);
-
-        vst1q_u16(reinterpret_cast<uint16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<7>::convolve<int16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -3);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        int16x8_t out = vmulq_n_s16(data_s16.val[0], _conv_row[0]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 1), _conv_row[1]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 2), _conv_row[2]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 3), _conv_row[3]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 4), _conv_row[4]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 5), _conv_row[5]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 6), _conv_row[6]);
-
-        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-void NESeparableConvolutionHorKernel<7>::convolve<int32_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -3);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        const int16x8_t data_s16_l2 = vextq_s16(data_s16.val[0], data_s16.val[1], 1);
-        const int16x8_t data_s16_l1 = vextq_s16(data_s16.val[0], data_s16.val[1], 2);
-        const int16x8_t data_s16_m  = vextq_s16(data_s16.val[0], data_s16.val[1], 3);
-        const int16x8_t data_s16_r1 = vextq_s16(data_s16.val[0], data_s16.val[1], 4);
-        const int16x8_t data_s16_r2 = vextq_s16(data_s16.val[0], data_s16.val[1], 5);
-        const int16x8_t data_s16_r3 = vextq_s16(data_s16.val[0], data_s16.val[1], 6);
-
-        int32x4_t out_low = vmull_n_s16(vget_low_s16(data_s16.val[0]), _conv_row[0]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l2), _conv_row[1]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l1), _conv_row[2]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_m), _conv_row[3]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r1), _conv_row[4]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r2), _conv_row[5]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r3), _conv_row[6]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), out_low);
-
-        int32x4_t out_high = vmull_n_s16(vget_high_s16(data_s16.val[0]), _conv_row[0]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l2), _conv_row[1]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l1), _conv_row[2]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_m), _conv_row[3]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r1), _conv_row[4]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r2), _conv_row[5]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r3), _conv_row[6]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, out_high);
-    },
-    input, output);
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<9>::convolve<uint16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -4);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const uint16x8x2_t data_u16 =
-        {
-            {
-                vmovl_u8(vget_low_u8(data)),
-                vmovl_u8(vget_high_u8(data))
-            }
-        };
-
-        uint16x8_t out = vmulq_n_u16(data_u16.val[0], _conv_row[0]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 1), _conv_row[1]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 2), _conv_row[2]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 3), _conv_row[3]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 4), _conv_row[4]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 5), _conv_row[5]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 6), _conv_row[6]);
-        out            = vmlaq_n_u16(out, vextq_u16(data_u16.val[0], data_u16.val[1], 7), _conv_row[7]);
-        out            = vmlaq_n_u16(out, data_u16.val[1], _conv_row[8]);
-
-        vst1q_u16(reinterpret_cast<uint16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-inline void NESeparableConvolutionHorKernel<9>::convolve<int16_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -4);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        int16x8_t out = vmulq_n_s16(data_s16.val[0], _conv_row[0]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 1), _conv_row[1]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 2), _conv_row[2]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 3), _conv_row[3]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 4), _conv_row[4]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 5), _conv_row[5]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 6), _conv_row[6]);
-        out           = vmlaq_n_s16(out, vextq_s16(data_s16.val[0], data_s16.val[1], 7), _conv_row[7]);
-        out           = vmlaq_n_s16(out, data_s16.val[1], _conv_row[8]);
-
-        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()), out);
-    },
-    input, output);
-}
-
-template <>
-template <>
-void NESeparableConvolutionHorKernel<9>::convolve<int32_t>(const Window &window)
-{
-    Window win_in(window);
-    win_in.shift(Window::DimX, -4);
-
-    Iterator input(_input, win_in);
-    Iterator output(_output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const uint8x16_t data = vld1q_u8(input.ptr());
-
-        const int16x8x2_t data_s16 =
-        {
-            {
-                vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data))),
-                vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(data)))
-            }
-        };
-
-        const int16x8_t data_s16_l3 = vextq_s16(data_s16.val[0], data_s16.val[1], 1);
-        const int16x8_t data_s16_l2 = vextq_s16(data_s16.val[0], data_s16.val[1], 2);
-        const int16x8_t data_s16_l1 = vextq_s16(data_s16.val[0], data_s16.val[1], 3);
-        const int16x8_t data_s16_m  = vextq_s16(data_s16.val[0], data_s16.val[1], 4);
-        const int16x8_t data_s16_r1 = vextq_s16(data_s16.val[0], data_s16.val[1], 5);
-        const int16x8_t data_s16_r2 = vextq_s16(data_s16.val[0], data_s16.val[1], 6);
-        const int16x8_t data_s16_r3 = vextq_s16(data_s16.val[0], data_s16.val[1], 7);
-
-        int32x4_t out_low = vmull_n_s16(vget_low_s16(data_s16.val[0]), _conv_row[0]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l3), _conv_row[1]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l2), _conv_row[2]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_l1), _conv_row[3]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_m), _conv_row[4]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r1), _conv_row[5]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r2), _conv_row[6]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16_r3), _conv_row[7]);
-        out_low           = vmlal_n_s16(out_low, vget_low_s16(data_s16.val[1]), _conv_row[8]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), out_low);
-
-        int32x4_t out_high = vmull_n_s16(vget_high_s16(data_s16.val[0]), _conv_row[0]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l3), _conv_row[1]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l2), _conv_row[2]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_l1), _conv_row[3]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_m), _conv_row[4]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r1), _conv_row[5]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r2), _conv_row[6]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16_r3), _conv_row[7]);
-        out_high           = vmlal_n_s16(out_high, vget_high_s16(data_s16.val[1]), _conv_row[8]);
-
-        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, out_high);
-    },
-    input, output);
-}
-
-template class arm_compute::NESeparableConvolutionHorKernel<5>;
-template class arm_compute::NESeparableConvolutionHorKernel<7>;
-template class arm_compute::NESeparableConvolutionHorKernel<9>;
-
-template <unsigned int matrix_size>
-NESeparableConvolutionVertKernel<matrix_size>::NESeparableConvolutionVertKernel()
-    : _conv_col{ { 0 } }, _scale(0)
-{
-}
-
-template <unsigned int matrix_size>
-BorderSize             NESeparableConvolutionVertKernel<matrix_size>::border_size() const
-{
-    return BorderSize{ matrix_size / 2, 0 };
-}
-
-template <unsigned int matrix_size>
-void NESeparableConvolutionVertKernel<matrix_size>::configure(const ITensor *input, ITensor *output, const int16_t *conv_col, uint32_t scale, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, conv_col);
-
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::S16, DataType::S32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
-    ARM_COMPUTE_ERROR_ON(scale == 0);
-
-    _input  = input;
-    _output = output;
-    std::copy_n(conv_col, _conv_col.size(), _conv_col.begin());
-    _scale = scale;
-
-    // Configure kernel window
-    constexpr unsigned int num_elems_processed_per_iteration = 16;
-    constexpr unsigned int num_elems_read_per_iteration      = 16;
-    constexpr unsigned int num_elems_written_per_iteration   = 16;
-
-    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowRectangle(input->info(), 0, -border_size().top, num_elems_read_per_iteration, matrix_size),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size());
-
-    INEKernel::configure(win);
-}
-
-template <unsigned int matrix_size>
-void NESeparableConvolutionVertKernel<matrix_size>::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
-    switch(_input->info()->data_type())
-    {
-        case DataType::U16:
-            switch(_output->info()->data_type())
-            {
-                case DataType::U8:
-                    convolution_u16<uint8_t>(window);
-                    break;
-                case DataType::S16:
-                    convolution_u16<int16_t>(window);
-                    break;
-                default:
-                    ARM_COMPUTE_ERROR("Not supported");
-            }
-            break;
-        case DataType::S16:
-            switch(_output->info()->data_type())
-            {
-                case DataType::U8:
-                    convolution_s16<uint8_t>(window);
-                    break;
-                case DataType::S16:
-                    convolution_s16<int16_t>(window);
-                    break;
-                default:
-                    ARM_COMPUTE_ERROR("Not supported");
-            }
-            break;
-        case DataType::S32:
-            switch(_output->info()->data_type())
-            {
-                case DataType::U8:
-                    convolution_s32<uint8_t>(window);
-                    break;
-                case DataType::S16:
-                    convolution_s32<int16_t>(window);
-                    break;
-                default:
-                    ARM_COMPUTE_ERROR("Not supported");
-            }
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Unsupported intermediate data type!");
-            break;
-    }
-}
-
-template <unsigned int matrix_size>
-template <typename OutputType>
-void NESeparableConvolutionVertKernel<matrix_size>::convolution_u16(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-
-    Window win_in(win);
-    win_in.set_dimension_step(Window::DimX, 8);
-
-    Iterator in(_input, win_in);
-    Iterator out(_output, win);
-
-    std::array<unsigned char *, matrix_size> input_ptrs{ {} };
-    const float32x4_t oneoverscale = vdupq_n_f32(1.0f / _scale);
-    const int         k_half       = matrix_size / 2;
-
-    // Set row pointers
-    for(int i = -k_half; i <= k_half; ++i)
-    {
-        input_ptrs[k_half + i] = _input->ptr_to_element(Coordinates(0, i));
-    }
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        uint16x8_t out0 = vdupq_n_u16(0);
-        uint16x8_t out1 = vdupq_n_u16(0);
-
-        // First half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const uint16x8_t data = vld1q_u16(reinterpret_cast<const uint16_t *>(input_ptrs[r] + in.offset()));
-            out0                  = vmlaq_n_u16(out0, data, _conv_col[r]);
-        }
-
-        in.increment(Window::DimX);
-
-        // Second half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const uint16x8_t data = vld1q_u16(reinterpret_cast<const uint16_t *>(input_ptrs[r] + in.offset()));
-            out1                  = vmlaq_n_u16(out1, data, _conv_col[r]);
-        }
-
-        //scale the result if needed
-        if(_scale != 1)
-        {
-            float32x4_t out0_f32_high = vcvtq_f32_u32(vmovl_u16(vget_high_u16(out0)));
-            float32x4_t out0_f32_low  = vcvtq_f32_u32(vmovl_u16(vget_low_u16(out0)));
-            out0_f32_high             = vmulq_f32(out0_f32_high, oneoverscale);
-            out0_f32_low              = vmulq_f32(out0_f32_low, oneoverscale);
-            store_results(vcvtq_u32_f32(out0_f32_low), vcvtq_u32_f32(out0_f32_high), reinterpret_cast<OutputType *>(out.ptr()));
-
-            float32x4_t out1_f32_high = vcvtq_f32_u32(vmovl_u16(vget_high_u16(out1)));
-            float32x4_t out1_f32_low  = vcvtq_f32_u32(vmovl_u16(vget_low_u16(out1)));
-            out1_f32_high             = vmulq_f32(out1_f32_high, oneoverscale);
-            out1_f32_low              = vmulq_f32(out1_f32_low, oneoverscale);
-            store_results(vcvtq_u32_f32(out1_f32_low), vcvtq_u32_f32(out1_f32_high), reinterpret_cast<OutputType *>(out.ptr()) + 8);
-        }
-        else
-        {
-            store_results(out0, out1, reinterpret_cast<OutputType *>(out.ptr()));
-        }
-    },
-    in, out);
-}
-
-template <unsigned int matrix_size>
-template <typename OutputType>
-void NESeparableConvolutionVertKernel<matrix_size>::convolution_s16(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-
-    Window win_in(win);
-    win_in.set_dimension_step(Window::DimX, 8);
-
-    Iterator in(_input, win_in);
-    Iterator out(_output, win);
-
-    std::array<unsigned char *, matrix_size> input_ptrs{ {} };
-    const float32x4_t oneoverscale = vdupq_n_f32(1.0f / _scale);
-    const int         k_half       = matrix_size / 2;
-
-    // Set row pointers
-    for(int i = -k_half; i <= k_half; ++i)
-    {
-        input_ptrs[k_half + i] = _input->ptr_to_element(Coordinates(0, i));
-    }
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int16x8_t out0 = vdupq_n_s16(0);
-        int16x8_t out1 = vdupq_n_s16(0);
-
-        // First half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const int16x8_t data = vld1q_s16(reinterpret_cast<const int16_t *>(input_ptrs[r] + in.offset()));
-            out0                 = vmlaq_n_s16(out0, data, _conv_col[r]);
-        }
-
-        in.increment(Window::DimX);
-
-        // Second half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const int16x8_t data = vld1q_s16(reinterpret_cast<const int16_t *>(input_ptrs[r] + in.offset()));
-            out1                 = vmlaq_n_s16(out1, data, _conv_col[r]);
-        }
-
-        //scale the result if needed
-        if(_scale != 1)
-        {
-            float32x4_t out0_f32_high = vcvtq_f32_s32(vmovl_s16(vget_high_s16(out0)));
-            float32x4_t out0_f32_low  = vcvtq_f32_s32(vmovl_s16(vget_low_s16(out0)));
-            out0_f32_high             = vmulq_f32(out0_f32_high, oneoverscale);
-            out0_f32_low              = vmulq_f32(out0_f32_low, oneoverscale);
-            store_results(vcvtq_s32_f32(out0_f32_low), vcvtq_s32_f32(out0_f32_high), reinterpret_cast<OutputType *>(out.ptr()));
-
-            float32x4_t out1_f32_high = vcvtq_f32_s32(vmovl_s16(vget_high_s16(out1)));
-            float32x4_t out1_f32_low  = vcvtq_f32_s32(vmovl_s16(vget_low_s16(out1)));
-            out1_f32_high             = vmulq_f32(out1_f32_high, oneoverscale);
-            out1_f32_low              = vmulq_f32(out1_f32_low, oneoverscale);
-            store_results(vcvtq_s32_f32(out1_f32_low), vcvtq_s32_f32(out1_f32_high), reinterpret_cast<OutputType *>(out.ptr()) + 8);
-        }
-        else
-        {
-            store_results(out0, out1, reinterpret_cast<OutputType *>(out.ptr()));
-        }
-    },
-    in, out);
-}
-
-template <unsigned int matrix_size>
-template <typename OutputType>
-void NESeparableConvolutionVertKernel<matrix_size>::convolution_s32(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-
-    Window win_in(win);
-    win_in.set_dimension_step(Window::DimX, 8);
-
-    Iterator in(_input, win_in);
-    Iterator out(_output, win);
-
-    std::array<unsigned char *, matrix_size> input_ptrs{ {} };
-    const float32x4_t oneoverscale = vdupq_n_f32(1.0f / _scale);
-    const int         k_half       = matrix_size / 2;
-
-    // Set row pointers
-    for(int i = -k_half; i <= k_half; ++i)
-    {
-        input_ptrs[k_half + i] = _input->ptr_to_element(Coordinates(0, i));
-    }
-
-    const int32x4_t zero = vdupq_n_s32(0);
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4x2_t out0 =
-        {
-            {
-                zero,
-                zero
-            }
-        };
-
-        int32x4x2_t out1 =
-        {
-            {
-                zero,
-                zero
-            }
-        };
-
-        // First half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const int32x4x2_t data = vld2q_s32(reinterpret_cast<const int32_t *>(input_ptrs[r] + in.offset()));
-            out0.val[0]            = vmlaq_n_s32(out0.val[0], data.val[0], _conv_col[r]);
-            out0.val[1]            = vmlaq_n_s32(out0.val[1], data.val[1], _conv_col[r]);
-        }
-
-        in.increment(Window::DimX);
-
-        // Second half
-        for(unsigned int r = 0; r < matrix_size; ++r)
-        {
-            const int32x4x2_t data = vld2q_s32(reinterpret_cast<const int32_t *>(input_ptrs[r] + in.offset()));
-            out1.val[0]            = vmlaq_n_s32(out1.val[0], data.val[0], _conv_col[r]);
-            out1.val[1]            = vmlaq_n_s32(out1.val[1], data.val[1], _conv_col[r]);
-        }
-
-        //scale the result if needed
-        if(_scale != 1)
-        {
-            float32x4_t out0_f32_odd  = vcvtq_f32_s32(out0.val[0]);
-            float32x4_t out0_f32_even = vcvtq_f32_s32(out0.val[1]);
-            out0_f32_odd              = vmulq_f32(out0_f32_odd, oneoverscale);
-            out0_f32_even             = vmulq_f32(out0_f32_even, oneoverscale);
-            out0.val[0]               = vcvtq_s32_f32(out0_f32_odd);
-            out0.val[1]               = vcvtq_s32_f32(out0_f32_even);
-
-            float32x4_t out1_f32_odd  = vcvtq_f32_s32(out1.val[0]);
-            float32x4_t out1_f32_even = vcvtq_f32_s32(out1.val[1]);
-            out1_f32_odd              = vmulq_f32(out1_f32_odd, oneoverscale);
-            out1_f32_even             = vmulq_f32(out1_f32_even, oneoverscale);
-            out1.val[0]               = vcvtq_s32_f32(out1_f32_odd);
-            out1.val[1]               = vcvtq_s32_f32(out1_f32_even);
-        }
-
-        const int32x4x2_t out0_s32 = vzipq_s32(out0.val[0], out0.val[1]);
-        store_results(out0_s32.val[0], out0_s32.val[1], reinterpret_cast<OutputType *>(out.ptr()));
-
-        const int32x4x2_t out1_s32 = vzipq_s32(out1.val[0], out1.val[1]);
-        store_results(out1_s32.val[0], out1_s32.val[1], reinterpret_cast<OutputType *>(out.ptr()) + 8);
-    },
-    in, out);
-}
-
-template class arm_compute::NESeparableConvolutionVertKernel<5>;
-template class arm_compute::NESeparableConvolutionVertKernel<7>;
-template class arm_compute::NESeparableConvolutionVertKernel<9>;
-
-/****************************************************************************************\
- *                                 Rectangle Convolution                                *
-\****************************************************************************************/
-
-NEConvolutionRectangleKernel::NEConvolutionRectangleKernel()
-    : _input(nullptr), _output(nullptr), _scale(0), _convolution(), _border_size(), _func_idx(0)
-{
-}
-
-BorderSize NEConvolutionRectangleKernel::border_size() const
-{
-    return _border_size;
-}
-
-void NEConvolutionRectangleKernel::configure(const ITensor *input, ITensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, conv);
-
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
-    ARM_COMPUTE_ERROR_ON(width != 3 && width != 5 && width != 7 && width != 9);
-    ARM_COMPUTE_ERROR_ON(height != 3 && height != 5 && height != 7 && height != 9);
-    ARM_COMPUTE_ERROR_ON(0 == scale);
-
-    _input       = input;
-    _output      = output;
-    _scale       = scale;
-    _border_size = BorderSize(height / 2, width / 2);
-
-    // Setup the convolution matrix
-    const uint32_t nr_elements = width * height;
-    _convolution.resize(nr_elements);
-    std::copy_n(conv, nr_elements, _convolution.begin());
-
-    // Set function index to help choose appropriate function in run()
-    _func_idx = get_index(height) * 4 + get_index(width);
-    ARM_COMPUTE_ERROR_ON(_func_idx > (_nr_supported_sizes * _nr_supported_sizes));
-
-    // Configure kernel window
-    constexpr unsigned int num_elems_processed_per_iteration = 8;
-    constexpr unsigned int num_elems_read_per_iteration      = 16;
-    constexpr unsigned int num_elems_written_per_iteration   = 8;
-
-    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, _border_size);
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowRectangle(input->info(), -_border_size.left, -_border_size.top, num_elems_read_per_iteration, height),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, _border_size);
-
-    INEKernel::configure(win);
-}
-
-void NEConvolutionRectangleKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
-    using ConvolutionRectangleFunction = void (NEConvolutionRectangleKernel::*)(const Window & window);
-
-    // uint8_t function table
-    static const std::array<ConvolutionRectangleFunction, 16> func_table_u8 =
-    {
-        {
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 3, 3>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 3, 5>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 3, 7>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 3, 9>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 5, 3>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 5, 5>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 5, 7>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 5, 9>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 7, 3>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 7, 5>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 7, 7>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 7, 9>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 9, 3>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 9, 5>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 9, 7>,
-            &NEConvolutionRectangleKernel::convolution<uint8_t, 9, 9>
-        }
-    };
-    // int16_t function table
-    static const std::array<ConvolutionRectangleFunction, 16> func_table_s16 =
-    {
-        {
-            &NEConvolutionRectangleKernel::convolution<int16_t, 3, 3>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 3, 5>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 3, 7>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 3, 9>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 5, 3>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 5, 5>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 5, 7>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 5, 9>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 7, 3>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 7, 5>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 7, 7>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 7, 9>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 9, 3>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 9, 5>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 9, 7>,
-            &NEConvolutionRectangleKernel::convolution<int16_t, 9, 9>
-        }
-    };
-
-    // Run appropriate function
-    switch(_output->info()->data_type())
-    {
-        case DataType::U8:
-            ARM_COMPUTE_ERROR_ON(_func_idx >= func_table_u8.size());
-            (this->*func_table_u8[_func_idx])(window);
-            break;
-        case DataType::S16:
-            ARM_COMPUTE_ERROR_ON(_func_idx >= func_table_s16.size());
-            (this->*func_table_s16[_func_idx])(window);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Not supported");
-    }
-}
-
-unsigned int NEConvolutionRectangleKernel::get_index(uint32_t val)
-{
-    switch(val)
-    {
-        case 3:
-            return 0;
-        case 5:
-            return 1;
-        case 7:
-            return 2;
-        case 9:
-            return 3;
-        default:
-            ARM_COMPUTE_ERROR("Not supported dimension size");
-            return 0;
-    }
-}
-
-template <typename OutputType, unsigned int rows, unsigned int cols>
-void NEConvolutionRectangleKernel::convolution(const Window &win)
-{
-    static_assert(sizeof(OutputType) == sizeof(uint8_t) || sizeof(OutputType) == sizeof(int16_t), "The output buffer can only be u8 or s16");
-    ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    std::array<unsigned char *, rows> input_ptrs{ {} };
-    const int16_t    *conv       = _convolution.data();
-    const float32x4_t scale_val  = vdupq_n_f32(1.0f / _scale);
-    const int         k_row_half = rows / 2;
-    const int         k_col_half = cols / 2;
-
-    // Set row pointers
-    for(int i = -k_row_half; i <= k_row_half; ++i)
-    {
-        input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-k_col_half, i));
-    }
-
-    execute_window_loop(win, [&](const Coordinates &)
-    {
-        int32x4_t out  = vdupq_n_s32(0);
-        int32x4_t out2 = vdupq_n_s32(0);
-
-        // Perform appropriate convolution
-        for(unsigned int r = 0; r < rows; ++r)
-        {
-            const uint8x16_t data = vld1q_u8(input_ptrs[r] + input.offset());
-            if(3 == cols)
-            {
-                convolve_row3x1(out, out2, data, conv + r * cols);
-            }
-            else if(5 == cols)
-            {
-                convolve_row5x1(out, out2, data, conv + r * cols);
-            }
-            else if(7 == cols)
-            {
-                convolve_row7x1(out, out2, data, conv + r * cols);
-            }
-            else if(9 == cols)
-            {
-                convolve_row9x1(out, out2, data, conv + r * cols);
-            }
-            else
-            {
-                ARM_COMPUTE_ERROR("Unsupported number of columns");
-            }
-        }
-
-        // Apply scale
-        if(_scale != 1)
-        {
-            // Convert to F32, scale and convert back to S32
-            out  = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out), scale_val));
-            out2 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(out2), scale_val));
-        }
-
-        // Clamp and store as U8 or S16:
-        store_results(out, out2, reinterpret_cast<OutputType *>(output.ptr()));
-    },
-    input, output);
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEConvolutionKernel.h b/src/core/NEON/kernels/NEConvolutionKernel.h
deleted file mode 100644
index b8bf1d1..0000000
--- a/src/core/NEON/kernels/NEConvolutionKernel.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NECONVOLUTIONKERNEL_H
-#define ARM_COMPUTE_NECONVOLUTIONKERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-#include "src/core/NEON/INESimpleKernel.h"
-
-#include <array>
-#include <cstdint>
-#include <vector>
-
-namespace arm_compute
-{
-class ITensor;
-
-/****************************************************************************************\
- *                                    Square Convolution                                *
-\****************************************************************************************/
-
-/** Interface for the kernel to run an arbitrary size convolution on a tensor. (Currently supports 3x3, 5x5, 7x7 and 9x9).
- * The client can supply a convolution matrix \f$ C_{m,n} \f$.
- * @f{eqnarray}{
- *  k_0 &=& \frac{m}{2}  \\
- *  l_0 &=& \frac{n}{2}  \\
- *  sum &=& \sum_{k=0,l=0}^{k=m-1,l=n-1} input(x+k-k_0, y+l-l_0) C_{k,l}
- *  @f}
- *
- * @note The above equation for this function is similar to the default OpenCV Filter2D function,
- *       which actually computes a correlation and not a convolution.
- *       In case of a real convolution the convolution matrix should be flipped both horizontally and vertically.
- */
-template <unsigned int matrix_size>
-class NEConvolutionKernel : public INESimpleKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEConvolutionKernel";
-    }
-    /** Default constructor */
-    NEConvolutionKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NEConvolutionKernel(const NEConvolutionKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NEConvolutionKernel &operator=(const NEConvolutionKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEConvolutionKernel(NEConvolutionKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEConvolutionKernel &operator=(NEConvolutionKernel &&) = default;
-    /** Default destructor */
-    ~NEConvolutionKernel() = default;
-    /** Initialise the kernel's input, output and border mode.
-     *
-     * @param[in]  input            Source tensor. Data type supported: U8.
-     * @param[out] output           Destination tensor. Data types supported: U8, S16.
-     * @param[in]  conv             Convolution matrix to apply to the input tensor.
-     * @param[in]  scale            Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, const int16_t *conv, uint32_t scale, bool border_undefined);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-    BorderSize border_size() const override;
-
-private:
-    template <typename OutputType>
-    void convolution(const Window &win);
-
-protected:
-    uint32_t _scale;                                             /**< scale of the convolution */
-    std::array<int16_t, matrix_size *matrix_size> _convolution;  /**< convolution matrix */
-};
-
-/** Interface for the kernel which applied a 3x3 convolution to a tensor.*/
-using NEConvolution3x3Kernel = NEConvolutionKernel<3>;
-/** Interface for the kernel which applied a 5x5 convolution to a tensor.*/
-using NEConvolution5x5Kernel = NEConvolutionKernel<5>;
-/** Interface for the kernel which applied a 7x7 convolution to a tensor.*/
-using NEConvolution7x7Kernel = NEConvolutionKernel<7>;
-///** Interface for the kernel which applied a 9x9 convolution to a tensor.*/
-using NEConvolution9x9Kernel = NEConvolutionKernel<9>;
-
-/****************************************************************************************\
- *                              Separable Square Convolution                            *
-\****************************************************************************************/
-
-/** Kernel for the Horizontal pass of a Separable Convolution */
-template <unsigned int matrix_size>
-class NESeparableConvolutionHorKernel : public INESimpleKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NESeparableConvolutionHorKernel";
-    }
-    /** Default constructor */
-    NESeparableConvolutionHorKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NESeparableConvolutionHorKernel(const NESeparableConvolutionHorKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NESeparableConvolutionHorKernel &operator=(const NESeparableConvolutionHorKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NESeparableConvolutionHorKernel(NESeparableConvolutionHorKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NESeparableConvolutionHorKernel &operator=(NESeparableConvolutionHorKernel &&) = default;
-    /** Default destructor */
-    ~NESeparableConvolutionHorKernel() = default;
-
-    /** Initialise the kernel's input, output and border mode.
-     *
-     * @param[in]  input            Source tensor. Data type supported: U8.
-     * @param[out] output           Destination tensor. Data types supported: U16, S16, S32.
-     * @param[in]  conv_row         Convolution matrix to apply to the input tensor.
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, const int16_t *conv_row, bool border_undefined);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-    BorderSize border_size() const override;
-
-private:
-    /** Apply the object's convolution to the given window of the input tensor..
-     *
-     * @param[in] window Window to apply the convolution on.
-     */
-    template <typename OutputType>
-    void convolve(const Window &window);
-
-    std::array<int16_t, matrix_size> _conv_row; /**< Convolution coefficients */
-    BorderSize _border_size;                    /**< Border size */
-};
-
-/** Interface for the kernel which applied a 5x1 horizontal convolution to a tensor.*/
-using NESeparableConvolution5x5HorKernel = NESeparableConvolutionHorKernel<5>;
-/** Interface for the kernel which applied a 7x1 horizontal convolution to a tensor.*/
-using NESeparableConvolution7x7HorKernel = NESeparableConvolutionHorKernel<7>;
-/** Interface for the kernel which applied a 9x1 horizontal convolution to a tensor.*/
-using NESeparableConvolution9x9HorKernel = NESeparableConvolutionHorKernel<9>;
-
-/** Kernel for the Vertical pass of a Separable Convolution */
-template <unsigned int matrix_size>
-class NESeparableConvolutionVertKernel : public INESimpleKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NESeparableConvolutionVertKernel";
-    }
-    /** Default constructor */
-    NESeparableConvolutionVertKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NESeparableConvolutionVertKernel(const NESeparableConvolutionVertKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NESeparableConvolutionVertKernel &operator=(const NESeparableConvolutionVertKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NESeparableConvolutionVertKernel(NESeparableConvolutionVertKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NESeparableConvolutionVertKernel &operator=(NESeparableConvolutionVertKernel &&) = default;
-    /** Default destructor */
-    ~NESeparableConvolutionVertKernel() = default;
-
-    /** Initialise the kernel's input, output and border mode.
-     *
-     * @param[in]  input            Source tensor. Data type supported: U16, S16, S32.
-     * @param[out] output           Destination tensor, Data types supported: U8, S16.
-     * @param[in]  conv_col         Convolution matrix to apply to the input tensor.
-     * @param[in]  scale            Scale of the convolution matrix
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, const int16_t *conv_col, uint32_t scale, bool border_undefined);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-    BorderSize border_size() const override;
-
-private:
-    /** Apply the object's convolution to the given window of the input tensor.
-     *  This function is used if the intermediate values have been stored as U16.
-     *
-     * @param[in] win Window to apply the convolution on.
-     */
-    template <typename OutputType>
-    void convolution_u16(const Window &win);
-    /** Apply the object's convolution to the given window of the input tensor.
-     *  This function is used if the intermediate values have been stored as S16.
-     *
-     * @param[in] win Window to apply the convolution on.
-     */
-    template <typename OutputType>
-    void convolution_s16(const Window &win);
-    /** Apply the object's convolution to the given window of the input tensor.
-     *  This function is used if the intermediate values have been stored as S32.
-     *
-     * @param[in] win Window to apply the convolution on.
-     */
-    template <typename OutputType>
-    void convolution_s32(const Window &win);
-
-    std::array<int16_t, matrix_size> _conv_col; /**< Convolution coefficients */
-    uint32_t _scale;                            /**< Convolution's scale */
-};
-
-/** Interface for the kernel which applied a 1x5 vertical convolution to a tensor.*/
-using NESeparableConvolution5x5VertKernel = NESeparableConvolutionVertKernel<5>;
-/** Interface for the kernel which applied a 1x7 vertical convolution to a tensor.*/
-using NESeparableConvolution7x7VertKernel = NESeparableConvolutionVertKernel<7>;
-/** Interface for the kernel which applied a 1x9 vertical convolution to a tensor.*/
-using NESeparableConvolution9x9VertKernel = NESeparableConvolutionVertKernel<9>;
-
-/****************************************************************************************\
- *                                 Rectangle Convolution                                *
-\****************************************************************************************/
-
-/** Kernel for the running convolution on a rectangle matrix.
- *
- * @note Supports combinations of 3,5,7 and 9.
- */
-class NEConvolutionRectangleKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEConvolutionRectangleKernel";
-    }
-    /** Default constructor */
-    NEConvolutionRectangleKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEConvolutionRectangleKernel(NEConvolutionRectangleKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEConvolutionRectangleKernel &operator=(NEConvolutionRectangleKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEConvolutionRectangleKernel(NEConvolutionRectangleKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEConvolutionRectangleKernel &operator=(NEConvolutionRectangleKernel &&) = default;
-    /** Default destructor */
-    ~NEConvolutionRectangleKernel() = default;
-    /** Initialise the kernel's input, output and border mode.
-     *
-     * @param[in]  input            Source tensor. Data type supported: U8.
-     * @param[out] output           Destination tensor, Data types supported: U8, S16.
-     * @param[in]  conv             Convolution matrix to apply to the input tensor.
-     * @param[in]  width            Width of convolution matrix (Number of columns)
-     * @param[in]  height           Height of convolution matrix (Number of rows)
-     * @param[in]  scale            Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-    BorderSize border_size() const override;
-
-private:
-    unsigned int get_index(uint32_t val);
-    /** Apply the object's convolution to the given window of the input tensor.
-     *
-     * @param[in] win Window to apply the convolution on.
-     */
-    template <typename OutputType, unsigned int rows, unsigned int cols>
-    void convolution(const Window &win);
-
-protected:
-    const ITensor            *_input;       /**< Input tensor */
-    ITensor                  *_output;      /**< Output tensor */
-    uint32_t                  _scale;       /**< Scale of the convolution */
-    std::vector<int16_t>      _convolution; /**< Convolution matrix */
-    BorderSize                _border_size; /**< Calculated border width */
-    uint32_t                  _func_idx;    /**< Index used to specify convolution function to be used */
-    const static unsigned int _nr_supported_sizes
-    {
-        4
-    }; /**< Number of supported permutations */
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NECONVOLUTIONKERNEL_H */
diff --git a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp
deleted file mode 100644
index 9f5dfcd..0000000
--- a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <arm_neon.h>
-#include <cstddef>
-
-using namespace arm_compute;
-
-namespace arm_compute
-{
-class Coordinates;
-} // namespace arm_compute
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-namespace fp16
-{
-inline void mask_top(const float16x8_t &vc, const float16x8_t &in0, const float16x8_t &in1, uint16x8_t &mask)
-{
-    // vc > nc.val[0], vc > nc.val[1], vc > nc.val[2]
-    mask = vandq_u16(mask, vcgeq_f16(vc, in0));
-    mask = vandq_u16(mask, vcgeq_f16(vc, vextq_f16(in0, in1, 1)));
-    mask = vandq_u16(mask, vcgeq_f16(vc, vextq_f16(in0, in1, 2)));
-}
-
-inline void mask_middle(const float16x8_t &vc, const float16x8_t &in0, const float16x8_t &in1, uint16x8_t &mask)
-{
-    // vc >= nc.val[0], vc > nc.val[2]
-    mask = vandq_u16(mask, vcgeq_f16(vc, in0));
-    mask = vandq_u16(mask, vcgtq_f16(vc, vextq_f16(in0, in1, 2)));
-}
-
-inline void mask_bottom(const float16x8_t &vc, const float16x8_t &in0, const float16x8_t &in1, uint16x8_t &mask)
-{
-    // vc > nc.val[0], vc > nc.val[1], vc > nc.val[2]
-    mask = vandq_u16(mask, vcgtq_f16(vc, in0));
-    mask = vandq_u16(mask, vcgtq_f16(vc, vextq_f16(in0, in1, 1)));
-    mask = vandq_u16(mask, vcgtq_f16(vc, vextq_f16(in0, in1, 2)));
-}
-
-inline void non_maxima_suppression3x3_F32_F32(const void *__restrict in_ptr, void *__restrict out_ptr, const uint32_t in_stride)
-{
-    auto       in  = static_cast<const float *__restrict>(in_ptr) - 1;
-    const auto out = static_cast<float *__restrict>(out_ptr);
-
-    // Get centre scores
-    const float16x8x2_t vc =
-    {
-        vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 1)), vcvt_f16_f32(vld1q_f32(in + 5))),
-        vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 9)), vcvt_f16_f32(vld1q_f32(in + 13)))
-    };
-
-    // Neighboring pixels
-    in -= in_stride;
-
-    static const float16x4_t  zero_f16x4 = vdup_n_f16(0);
-    static const uint16x8_t   zero_u16   = vdupq_n_u16(0);
-    static const uint16x8_t   true_mask  = vceqq_u16(zero_u16, zero_u16);
-    static const uint16x8x2_t true_mask_x2 =
-    {
-        true_mask,
-        true_mask
-    };
-
-    uint16x8x2_t mask = true_mask_x2;
-
-    // Top row
-    const float16x8_t tmp_top0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
-    const float16x8_t tmp_top1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
-    const float16x8_t tmp_top2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
-
-    // vc >= nc.val[0], vc >= nc.val[1], vc >= nc.val[2]
-    mask_top(vc.val[0], tmp_top0, tmp_top1, mask.val[0]);
-    mask_top(vc.val[1], tmp_top1, tmp_top2, mask.val[1]);
-
-    in += in_stride;
-
-    // Middle row
-    const float16x8_t tmp_mid0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
-    const float16x8_t tmp_mid1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
-    const float16x8_t tmp_mid2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
-
-    // vc >= nc.val[0], vc > nc.val[2]
-    mask_middle(vc.val[0], tmp_mid0, tmp_mid1, mask.val[0]);
-    mask_middle(vc.val[1], tmp_mid1, tmp_mid2, mask.val[1]);
-
-    in += in_stride;
-
-    // Bottom row
-    const float16x8_t tmp_bot0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
-    const float16x8_t tmp_bot1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
-    const float16x8_t tmp_bot2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
-
-    // vc > nc.val[0], vc > nc.val[1], vc > nc.val[2]
-    mask_bottom(vc.val[0], tmp_bot0, tmp_bot1, mask.val[0]);
-    mask_bottom(vc.val[1], tmp_bot1, tmp_bot2, mask.val[1]);
-
-    // Store
-    static const float16x8_t zero_f16x8 = vdupq_n_f16(0);
-
-    const float16x8_t suppressed0 = vbslq_f16(mask.val[0], vc.val[0], zero_f16x8);
-    vst1q_f32(out + 0, vcvt_f32_f16(vget_low_f16(suppressed0)));
-    vst1q_f32(out + 4, vcvt_f32_f16(vget_high_f16(suppressed0)));
-
-    const float16x8_t suppressed1 = vbslq_f16(mask.val[1], vc.val[1], zero_f16x8);
-    vst1q_f32(out + 8, vcvt_f32_f16(vget_low_f16(suppressed1)));
-    vst1q_f32(out + 12, vcvt_f32_f16(vget_high_f16(suppressed1)));
-}
-
-inline void non_maxima_suppression3x3_U8_U8(const void *__restrict in_ptr, void *__restrict out_ptr, const uint32_t in_stride)
-{
-    auto       in  = static_cast<const uint8_t *__restrict>(in_ptr) - 1;
-    const auto out = static_cast<uint8_t *__restrict>(out_ptr);
-
-    // Get centre scores
-    const uint8x16_t vc = vld1q_u8(in + 1);
-
-    // Neighboring pixels
-    in -= in_stride;
-
-    // Top row
-    const uint8x16_t l_nc_0 = vld1q_u8(in);
-    const uint8x16_t m_nc_0 = vld1q_u8(in + 1);
-    const uint8x16_t r_nc_0 = vld1q_u8(in + 2);
-
-    // Keep center scores if ...
-    // vc >= l_nc_0, vc >= m_nc_0, vc >= r_nc_0
-    uint8x16_t mask = vcgeq_u8(vc, l_nc_0);
-    mask            = vandq_u8(mask, vcgeq_u8(vc, m_nc_0));
-    mask            = vandq_u8(mask, vcgeq_u8(vc, r_nc_0));
-
-    in += in_stride;
-
-    // Middle row
-    const uint8x16_t l_nc_1 = vld1q_u8(in);
-    const uint8x16_t r_nc_1 = vld1q_u8(in + 2);
-
-    // ... and ...
-    // vc >= l_nc_1, vc > r_nc_1
-    mask = vandq_u8(mask, vcgeq_u8(vc, l_nc_1));
-    mask = vandq_u8(mask, vcgtq_u8(vc, r_nc_1));
-
-    in += in_stride;
-
-    // Bottom row
-    const uint8x16_t l_nc_2 = vld1q_u8(in);
-    const uint8x16_t m_nc_2 = vld1q_u8(in + 1);
-    const uint8x16_t r_nc_2 = vld1q_u8(in + 2);
-
-    // ... and ...
-    // vc > l_nc_2, vc > m_nc_2, vc > r_nc_2
-    mask = vandq_u8(mask, vcgtq_u8(vc, l_nc_2));
-    mask = vandq_u8(mask, vcgtq_u8(vc, m_nc_2));
-    mask = vandq_u8(mask, vcgtq_u8(vc, r_nc_2));
-
-    // Store
-    static const uint8x16_t zero = vdupq_n_u8(0);
-    vst1q_u8(out, vbslq_u8(mask, vc, zero));
-}
-} // namespace fp16
-
-void NENonMaximaSuppression3x3FP16Kernel::configure(const ITensor *input, ITensor *output, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-
-    _input  = input;
-    _output = output;
-
-    switch(input->info()->data_type())
-    {
-        case DataType::U8:
-            _func = &fp16::non_maxima_suppression3x3_U8_U8;
-            break;
-        default:
-            _func = &fp16::non_maxima_suppression3x3_F32_F32;
-            break;
-    }
-
-    constexpr unsigned int num_elems_processed_per_iteration = 16;
-    const unsigned int     num_elems_read_per_iteration      = 16 + 2 * border_size().left + (input->info()->data_type() == DataType::U8 ? 0 : 3);
-    constexpr unsigned int num_elems_written_per_iteration   = 16;
-    constexpr unsigned int num_rows_read_per_iteration       = 3;
-
-    // Configure kernel window
-    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowRectangle(input->info(), -border_size().left, -border_size().top, num_elems_read_per_iteration, num_rows_read_per_iteration),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size());
-
-    INEKernel::configure(win);
-}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-namespace
-{
-inline void non_maxima_suppression3x3_FLOAT_FLOAT(const void *__restrict input_ptr, void *__restrict output_ptr, const uint32_t input_stride)
-{
-    auto       input  = static_cast<const float *__restrict>(input_ptr) - 1;
-    const auto output = static_cast<float *__restrict>(output_ptr);
-
-    // Get centre scores
-    const float32x4x4_t vc =
-    {
-        {
-            vld1q_f32(input + 1),
-            vld1q_f32(input + 5),
-            vld1q_f32(input + 9),
-            vld1q_f32(input + 13)
-        }
-    };
-
-    // Neighboring pixels
-    float32x4x4_t l_nc{ {} };
-    float32x4x4_t m_nc{ {} };
-    float32x4x4_t r_nc{ {} };
-
-    input -= input_stride;
-
-    // Row0 - Low part
-    float32x4_t tmp_low   = vld1q_f32(input);
-    float32x4_t tmp_high  = vld1q_f32(input + 4);
-    float32x4_t tmp_high1 = vld1q_f32(input + 8);
-
-    l_nc.val[0] = tmp_low;
-    m_nc.val[0] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[1] = tmp_low;
-    m_nc.val[1] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // Row0 - High part
-    tmp_low   = tmp_high1;
-    tmp_high  = vld1q_f32(input + 12);
-    tmp_high1 = vld1q_f32(input + 16);
-
-    l_nc.val[2] = tmp_low;
-    m_nc.val[2] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[3] = tmp_low;
-    m_nc.val[3] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // mc >= nc.val[0], mc >= nc.val[1], mc >= nc.val[2]
-    uint32x4x4_t mask{ {} };
-    mask.val[0] = vcgeq_f32(vc.val[0], l_nc.val[0]);
-    mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], m_nc.val[0]));
-    mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], r_nc.val[0]));
-    mask.val[1] = vcgeq_f32(vc.val[1], l_nc.val[1]);
-    mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], m_nc.val[1]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], r_nc.val[1]));
-    mask.val[2] = vcgeq_f32(vc.val[2], l_nc.val[2]);
-    mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], m_nc.val[2]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], r_nc.val[2]));
-    mask.val[3] = vcgeq_f32(vc.val[3], l_nc.val[3]);
-    mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], m_nc.val[3]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], r_nc.val[3]));
-
-    input += input_stride;
-
-    // Row1 - Low part
-    tmp_low   = vld1q_f32(input);
-    tmp_high  = vld1q_f32(input + 4);
-    tmp_high1 = vld1q_f32(input + 8);
-
-    l_nc.val[0] = tmp_low;
-    r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[1] = tmp_low;
-    r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // Row1 - High part
-    tmp_low   = tmp_high1;
-    tmp_high  = vld1q_f32(input + 12);
-    tmp_high1 = vld1q_f32(input + 16);
-
-    l_nc.val[2] = tmp_low;
-    r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[3] = tmp_low;
-    r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // mc >= nc.val[0], mc > nc.val[2]
-    mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], l_nc.val[0]));
-    mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], r_nc.val[0]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], l_nc.val[1]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], r_nc.val[1]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], l_nc.val[2]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], r_nc.val[2]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], l_nc.val[3]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], r_nc.val[3]));
-
-    input += input_stride;
-
-    // Row2 - Low part
-    tmp_low   = vld1q_f32(input);
-    tmp_high  = vld1q_f32(input + 4);
-    tmp_high1 = vld1q_f32(input + 8);
-
-    l_nc.val[0] = tmp_low;
-    m_nc.val[0] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[1] = tmp_low;
-    m_nc.val[1] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // Row2 - High part
-    tmp_low   = tmp_high1;
-    tmp_high  = vld1q_f32(input + 12);
-    tmp_high1 = vld1q_f32(input + 16);
-
-    l_nc.val[2] = tmp_low;
-    m_nc.val[2] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
-
-    tmp_low  = tmp_high;
-    tmp_high = tmp_high1;
-
-    l_nc.val[3] = tmp_low;
-    m_nc.val[3] = vextq_f32(tmp_low, tmp_high, 1);
-    r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
-
-    // mc > nc.val[0], mc > nc.val[1], mc > nc.val[2]
-    mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], l_nc.val[0]));
-    mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], m_nc.val[0]));
-    mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], r_nc.val[0]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], l_nc.val[1]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], m_nc.val[1]));
-    mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], r_nc.val[1]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], l_nc.val[2]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], m_nc.val[2]));
-    mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], r_nc.val[2]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], l_nc.val[3]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], m_nc.val[3]));
-    mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], r_nc.val[3]));
-
-    static const float32x4_t zero = vdupq_n_f32(0.f);
-
-    // Store
-    vst1q_f32(output + 0, vbslq_f32(mask.val[0], vc.val[0], zero));
-    vst1q_f32(output + 4, vbslq_f32(mask.val[1], vc.val[1], zero));
-    vst1q_f32(output + 8, vbslq_f32(mask.val[2], vc.val[2], zero));
-    vst1q_f32(output + 12, vbslq_f32(mask.val[3], vc.val[3], zero));
-}
-
-inline void non_maxima_suppression3x3_U8_U8(const void *__restrict input_ptr, void *__restrict output_ptr, const uint32_t input_stride)
-{
-    auto       input  = static_cast<const uint8_t *__restrict>(input_ptr) - 1;
-    const auto output = static_cast<uint8_t *__restrict>(output_ptr);
-
-    // Get centre scores
-    const uint8x16_t vc = vld1q_u8(input + 1);
-
-    // Neighboring pixels
-    uint8x16_t l_nc{};
-    uint8x16_t m_nc{};
-    uint8x16_t r_nc{};
-
-    input -= input_stride;
-
-    // Row0
-    l_nc = vld1q_u8(input);
-    m_nc = vld1q_u8(input + 1);
-    r_nc = vld1q_u8(input + 2);
-
-    // mc >= l_nc, mc >= m_nc, mc >= r_nc
-    uint8x16_t mask = vcgeq_u8(vc, l_nc);
-    mask            = vandq_u8(mask, vcgeq_u8(vc, m_nc));
-    mask            = vandq_u8(mask, vcgeq_u8(vc, r_nc));
-
-    input += input_stride;
-
-    // Row1
-    l_nc = vld1q_u8(input);
-    r_nc = vld1q_u8(input + 2);
-
-    // mc >= l_nc, mc > r_nc
-    mask = vandq_u8(mask, vcgeq_u8(vc, l_nc));
-    mask = vandq_u8(mask, vcgtq_u8(vc, r_nc));
-
-    input += input_stride;
-
-    // Row2
-    l_nc = vld1q_u8(input);
-    m_nc = vld1q_u8(input + 1);
-    r_nc = vld1q_u8(input + 2);
-
-    // mc > l_nc, mc > m_nc, mc > r_nc
-    mask = vandq_u8(mask, vcgtq_u8(vc, l_nc));
-    mask = vandq_u8(mask, vcgtq_u8(vc, m_nc));
-    mask = vandq_u8(mask, vcgtq_u8(vc, r_nc));
-
-    static const uint8x16_t zero = vdupq_n_u8(0);
-
-    // Store
-    vst1q_u8(output, vbslq_u8(mask, vc, zero));
-}
-} // namespace
-
-NENonMaximaSuppression3x3Kernel::NENonMaximaSuppression3x3Kernel()
-    : _func(nullptr), _input(nullptr), _output(nullptr)
-{
-}
-
-BorderSize NENonMaximaSuppression3x3Kernel::border_size() const
-{
-    return BorderSize(1);
-}
-
-void NENonMaximaSuppression3x3Kernel::configure(const ITensor *input, ITensor *output, bool border_undefined)
-{
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-
-    _input  = input;
-    _output = output;
-
-    if(input->info()->data_type() == DataType::U8)
-    {
-        _func = &non_maxima_suppression3x3_U8_U8;
-    }
-    else
-    {
-        _func = &non_maxima_suppression3x3_FLOAT_FLOAT;
-    }
-
-    constexpr unsigned int num_elems_processed_per_iteration = 16;
-    const unsigned int     num_elems_read_per_iteration      = 16 + 2 * border_size().left + (input->info()->data_type() == DataType::U8 ? 0 : 3);
-    constexpr unsigned int num_elems_written_per_iteration   = 16;
-    constexpr unsigned int num_rows_read_per_iteration       = 3;
-
-    // Configure kernel window
-    Window                 win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
-
-    update_window_and_padding(win,
-                              AccessWindowRectangle(input->info(), -border_size().left, -border_size().top, num_elems_read_per_iteration, num_rows_read_per_iteration),
-                              output_access);
-
-    output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size());
-
-    INEKernel::configure(win);
-}
-
-void NENonMaximaSuppression3x3Kernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-    ARM_COMPUTE_ERROR_ON(_func == nullptr);
-    Iterator input(_input, window);
-    Iterator output(_output, window);
-
-    const size_t input_stride = _input->info()->strides_in_bytes()[1] / element_size_from_data_type(_input->info()->data_type());
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        _func(input.ptr(), output.ptr(), input_stride);
-    },
-    input, output);
-}
diff --git a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h
deleted file mode 100644
index 4194dac..0000000
--- a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NENONMAXIMASUPPRESSION3x3KERNEL_H
-#define ARM_COMPUTE_NENONMAXIMASUPPRESSION3x3KERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-
-#include <cstdint>
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Interface to perform Non-Maxima suppression over a 3x3 window using Neon
- *
- */
-class NENonMaximaSuppression3x3Kernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NENonMaximaSuppression3x3Kernel";
-    }
-    /** Default constructor */
-    NENonMaximaSuppression3x3Kernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NENonMaximaSuppression3x3Kernel(const NENonMaximaSuppression3x3Kernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NENonMaximaSuppression3x3Kernel &operator=(const NENonMaximaSuppression3x3Kernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NENonMaximaSuppression3x3Kernel(NENonMaximaSuppression3x3Kernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NENonMaximaSuppression3x3Kernel &operator=(NENonMaximaSuppression3x3Kernel &&) = default;
-    /** Default destructor */
-    ~NENonMaximaSuppression3x3Kernel() = default;
-
-    /** Initialise the kernel's sources, destinations and border mode.
-     *
-     * @param[in]  input            Source tensor. Data types supported: U8/F32
-     * @param[out] output           Destination tensor. Data types supported: same as @p input
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, bool border_undefined);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-    BorderSize border_size() const override;
-
-protected:
-    /** Common signature for all the specialised non-maxima suppression 3x3 functions
-     *
-     * @param[in]  input_ptr    Pointer to the input tensor.
-     * @param[out] output_ptr   Pointer to the output tensor
-     * @param[in]  input_stride Stride of the input tensor
-     */
-    using NonMaxSuppr3x3Function = void(const void *__restrict input_ptr, void *__restrict output_ptr, const uint32_t input_stride);
-
-    NonMaxSuppr3x3Function *_func;   /**< Non-Maxima suppression function to use for the particular tensor types passed to configure() */
-    const ITensor          *_input;  /**< Source tensor */
-    ITensor                *_output; /**< Destination tensor */
-};
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-/** Neon kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32
- */
-class NENonMaximaSuppression3x3FP16Kernel : public NENonMaximaSuppression3x3Kernel
-{
-public:
-    const char *name() const override
-    {
-        return "NENonMaximaSuppression3x3FP16Kernel";
-    }
-    /** Initialise the kernel's sources, destinations and border mode.
-     *
-     * @param[in]  input            Source tensor. Data types supported: U8/F32.
-     * @param[out] output           Destination tensor. Data types supported: same as @p input
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
-     */
-    void configure(const ITensor *input, ITensor *output, bool border_undefined);
-};
-#else  /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-/** Neon kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32 */
-using NENonMaximaSuppression3x3FP16Kernel = NENonMaximaSuppression3x3Kernel;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace arm_compute
-#endif /* _ARM_COMPUTE_NENONMAXIMASUPPRESSION3x3KERNEL_H */
diff --git a/src/core/NEON/kernels/NERemapKernel.cpp b/src/core/NEON/kernels/NERemapKernel.cpp
new file mode 100644
index 0000000..24d0dd8
--- /dev/null
+++ b/src/core/NEON/kernels/NERemapKernel.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/NEON/kernels/NERemapKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "src/core/AccessWindowStatic.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/ScaleHelpers.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+using namespace arm_compute;
+
+namespace arm_compute
+{
+class Coordinates;
+} // namespace arm_compute
+
+namespace
+{
+inline int32x4_t offset_nearest_interpolation(const float *mapx_ptr, const float *mapy_ptr, const float32x4_t &width, const float32x4_t &height, const int32x4_t &stride)
+{
+    const float32x4_t lowerxy = vdupq_n_f32(-1.f);
+
+    float32x4_t x = vld1q_f32(mapx_ptr);
+    float32x4_t y = vld1q_f32(mapy_ptr);
+
+    // Clamp x coordinates
+    x = vmaxq_f32(lowerxy, vminq_f32(x, width));
+    y = vmaxq_f32(lowerxy, vminq_f32(y, height));
+
+    const int32x4_t x_s32 = vcvtq_s32_f32(x);
+    const int32x4_t y_s32 = vcvtq_s32_f32(y);
+
+    return vmlaq_s32(x_s32, y_s32, stride);
+}
+
+} // namespace
+
+NERemapKernel::NERemapKernel()
+    : _func(nullptr), _input(nullptr), _output(nullptr), _map_x(nullptr), _map_y(nullptr)
+{
+}
+
+BorderSize NERemapKernel::border_size() const
+{
+    return BorderSize(1);
+}
+
+void NERemapKernel::configure(const ITensor *input, const ITensor *map_x, const ITensor *map_y, ITensor *output, InterpolationPolicy policy)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_x, 1, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_y, 1, DataType::F32);
+
+    _input  = input;
+    _output = output;
+    _map_x  = map_x;
+    _map_y  = map_y;
+
+    switch(policy)
+    {
+        case InterpolationPolicy::NEAREST_NEIGHBOR:
+        {
+            _func = &NERemapKernel::remap_nearest;
+            break;
+        }
+        case InterpolationPolicy::BILINEAR:
+        {
+            _func = &NERemapKernel::remap_bilinear;
+            break;
+        }
+        default:
+            ARM_COMPUTE_ERROR("Unsupported interpolation mode");
+            break;
+    }
+
+    constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+
+    const int total_right  = ceil_to_multiple(input->info()->dimension(0), num_elems_processed_per_iteration);
+    const int access_right = total_right + (((total_right - input->info()->dimension(0)) == 0) ? border_size().right : 0);
+
+    AccessWindowStatic input_access(input->info(), -border_size().left, -border_size().top, access_right, input->info()->dimension(1) + border_size().bottom);
+
+    AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+    AccessWindowHorizontal mapx_access(map_x->info(), 0, num_elems_processed_per_iteration);
+    AccessWindowHorizontal mapy_access(map_y->info(), 0, num_elems_processed_per_iteration);
+
+    update_window_and_padding(win, input_access, mapx_access, mapy_access, output_access);
+
+    output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+    INEKernel::configure(win);
+}
+
+void NERemapKernel::remap_nearest(const Window &window)
+{
+    // Don't increment in X and Y direction for the input tensor
+    // A pointer to the start of this plane is needed as base for the precomputed offsets
+    Window win_in(window);
+    win_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+    win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+    Iterator in(_input, win_in);
+    Iterator out(_output, window);
+    Iterator mapx(_map_x, window);
+    Iterator mapy(_map_y, window);
+
+    const float32x4_t width     = vdupq_n_f32(static_cast<float>(_input->info()->dimension(0)));
+    const float32x4_t height    = vdupq_n_f32(static_cast<float>(_input->info()->dimension(1)));
+    const int32x4_t   in_stride = vdupq_n_s32(static_cast<int32_t>(_input->info()->strides_in_bytes()[1]));
+
+    execute_window_loop(window, [&](const Coordinates &)
+    {
+        const auto     mapx_ptr = reinterpret_cast<const float *>(mapx.ptr());
+        const auto     mapy_ptr = reinterpret_cast<const float *>(mapy.ptr());
+        const uint8_t *in_ptr   = in.ptr();
+
+        const int32x4_t offset0 = offset_nearest_interpolation(mapx_ptr + 0, mapy_ptr + 0, width, height, in_stride);
+        const int32x4_t offset1 = offset_nearest_interpolation(mapx_ptr + 4, mapy_ptr + 4, width, height, in_stride);
+        const int32x4_t offset2 = offset_nearest_interpolation(mapx_ptr + 8, mapy_ptr + 8, width, height, in_stride);
+        const int32x4_t offset3 = offset_nearest_interpolation(mapx_ptr + 12, mapy_ptr + 12, width, height, in_stride);
+
+        uint8x16_t tmp = vdupq_n_u8(0);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset0, 0)], tmp, 0);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset0, 1)], tmp, 1);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset0, 2)], tmp, 2);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset0, 3)], tmp, 3);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset1, 0)], tmp, 4);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset1, 1)], tmp, 5);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset1, 2)], tmp, 6);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset1, 3)], tmp, 7);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset2, 0)], tmp, 8);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset2, 1)], tmp, 9);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset2, 2)], tmp, 10);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset2, 3)], tmp, 11);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset3, 0)], tmp, 12);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset3, 1)], tmp, 13);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset3, 2)], tmp, 14);
+        tmp            = vsetq_lane_u8(in_ptr[vgetq_lane_s32(offset3, 3)], tmp, 15);
+        vst1q_u8(out.ptr(), tmp);
+    },
+    in, out, mapx, mapy);
+}
+
+void NERemapKernel::remap_bilinear(const Window &window)
+{
+    using namespace scale_helpers;
+
+    // Don't increment in X and Y direction for the input tensor
+    // A pointer to the start of this plane is needed as base for the precomputed offsets
+    Window win_in(window);
+    win_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+    win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+    Iterator in(_input, win_in);
+    Iterator out(_output, window);
+    Iterator mapx(_map_x, window);
+    Iterator mapy(_map_y, window);
+
+    const size_t width     = _input->info()->dimension(0);
+    const size_t height    = _input->info()->dimension(1);
+    const size_t in_stride = _input->info()->strides_in_bytes()[1];
+
+    execute_window_loop(window, [&](const Coordinates &)
+    {
+        const auto     mapx_ptr = reinterpret_cast<float *>(mapx.ptr());
+        const auto     mapy_ptr = reinterpret_cast<float *>(mapy.ptr());
+        const uint8_t *in_ptr   = in.ptr();
+
+        uint8x8_t tmp0 = vdup_n_u8(0);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[0], mapy_ptr[0]), tmp0, 0);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[1], mapy_ptr[1]), tmp0, 1);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[2], mapy_ptr[2]), tmp0, 2);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[3], mapy_ptr[3]), tmp0, 3);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[4], mapy_ptr[4]), tmp0, 4);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[5], mapy_ptr[5]), tmp0, 5);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[6], mapy_ptr[6]), tmp0, 6);
+        tmp0           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[7], mapy_ptr[7]), tmp0, 7);
+
+        uint8x8_t tmp1 = vdup_n_u8(0);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[8], mapy_ptr[8]), tmp1, 0);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[9], mapy_ptr[9]), tmp1, 1);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[10], mapy_ptr[10]), tmp1, 2);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[11], mapy_ptr[11]), tmp1, 3);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[12], mapy_ptr[12]), tmp1, 4);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[13], mapy_ptr[13]), tmp1, 5);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[14], mapy_ptr[14]), tmp1, 6);
+        tmp1           = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride, width, height, mapx_ptr[15], mapy_ptr[15]), tmp1, 7);
+
+        vst1q_u8(out.ptr(), vcombine_u8(tmp0, tmp1));
+    },
+    in, out, mapx, mapy);
+}
+
+void NERemapKernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON(_func == nullptr);
+
+    (this->*_func)(window);
+}
diff --git a/src/core/NEON/kernels/NERemapKernel.h b/src/core/NEON/kernels/NERemapKernel.h
new file mode 100644
index 0000000..adc7f4b
--- /dev/null
+++ b/src/core/NEON/kernels/NERemapKernel.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_NEREMAPKERNEL_H
+#define ARM_COMPUTE_NEREMAPKERNEL_H
+
+#include "arm_compute/core/Types.h"
+#include "src/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Neon kernel to perform a remap on a tensor */
+class NERemapKernel : public INEKernel
+{
+public:
+    const char *name() const override
+    {
+        return "NERemapKernel";
+    }
+    /** Default constructor */
+    NERemapKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NERemapKernel(const NERemapKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NERemapKernel &operator=(const NERemapKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NERemapKernel(NERemapKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NERemapKernel &operator=(NERemapKernel &&) = default;
+    /** Default destructor */
+    ~NERemapKernel() = default;
+
+    /** Initialize the kernel's input, output and border mode.
+     *
+     * @param[in]  input  Source tensor. Data type supported: U8.
+     * @param[in]  map_x  Map for X coordinates. Data type supported: F32.
+     * @param[in]  map_y  Map for Y coordinates. Data type supported: F32.
+     * @param[out] output Destination tensor. Data types supported: U8. All but the lowest two dimensions must be the same size as in the input tensor, i.e. remapping is only performed within the XY-plane.
+     * @param[in]  policy The interpolation type.
+     */
+    void configure(const ITensor *input, const ITensor *map_x, const ITensor *map_y, ITensor *output, InterpolationPolicy policy);
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+    BorderSize border_size() const override;
+
+private:
+    /** function to perform nearest interpolation on the given window */
+    void remap_nearest(const Window &window);
+    /** function to perform bilinear interpolation on the given window */
+    void remap_bilinear(const Window &window);
+    /** Remap function to use for the particular interpolation type passed to configure() */
+    void (NERemapKernel::*_func)(const Window &window);
+
+    const ITensor *_input;  /**< Input image */
+    ITensor       *_output; /**< Output image */
+    const ITensor *_map_x;  /**< Input remap x coordinates */
+    const ITensor *_map_y;  /**< Input remap y coordinates */
+};
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_NEREMAPKERNEL_H */