Add quantized support for unary elementwise in CPU

* Add quantized unary elementwise in CPU using LUT.
* Widen the input data range of the test suite.
  - Fix CPU exponential function overflow/underflow range.
  - Fix saturation issue of CL round operator.

Resolves: COMPMID-5763
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
index 335de78..0adf28a 100644
--- a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
+++ b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,6 +26,7 @@
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "src/core/CPP/Validate.h"
 #include "src/core/common/Registrars.h"
@@ -42,6 +43,72 @@
 {
 namespace
 {
+#ifdef __aarch64__
+
+std::unique_ptr<uint8_t[]> q8_prepare_lut(ElementWiseUnary op, const ITensorInfo *src, const ITensorInfo *dst)
+{
+    ARM_COMPUTE_ERROR_ON(src->data_type() != dst->data_type());
+    ARM_COMPUTE_ERROR_ON(!is_data_type_quantized(src->data_type()));
+    ARM_COMPUTE_ERROR_ON(src->element_size() != 1);
+
+    auto lut = std::unique_ptr<uint8_t[]>(new uint8_t[256]);
+    const auto is_signed = src->data_type() == DataType::QASYMM8_SIGNED;
+    const auto src_qi = src->quantization_info().uniform();
+    const auto dst_qi = dst->quantization_info().uniform();
+
+    const auto dst_min_fp = (((is_signed) ? -128 : 0) - dst_qi.offset) * dst_qi.scale;
+    const auto dst_max_fp = (((is_signed) ? 127 : 255) - dst_qi.offset) * dst_qi.scale;
+
+    for(int i = 0; i < 256; ++i)
+    {
+        const auto in = (is_signed) ? dequantize_qasymm8_signed(static_cast<int8_t>(i), src_qi) : dequantize_qasymm8(i, src_qi);
+        float result = 0;
+
+        switch(op)
+        {
+            case ElementWiseUnary::RSQRT:
+                result = 1 / sqrt(in);
+                break;
+
+            case ElementWiseUnary::EXP:
+                result = std::exp(in);
+                break;
+
+            case ElementWiseUnary::NEG:
+                result = -in;
+                break;
+
+            case ElementWiseUnary::LOG:
+                result = std::log(in);
+                break;
+
+            case ElementWiseUnary::ABS:
+                result = std::abs(in);
+                break;
+
+            case ElementWiseUnary::ROUND:
+                result = support::cpp11::nearbyint(in);
+                break;
+
+            case ElementWiseUnary::SIN:
+                result = std::sin(in);
+                break;
+
+            default:
+                ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+        }
+
+        result = utility::clamp(result, dst_min_fp, dst_max_fp);
+
+        const auto out = (is_signed) ? static_cast<uint8_t>(quantize_qasymm8_signed(result, dst_qi)) : quantize_qasymm8(result, dst_qi);
+        lut[i] = out;
+    }
+
+    return lut;
+}
+
+#endif // __aarch64__
+
 static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> available_kernels =
 {
     {
@@ -50,7 +117,8 @@
         {
             return (data.dt == DataType::F32 && data.isa.sve);
         },
-        REGISTER_FP32_SVE(sve_fp32_elementwise_unary)
+        REGISTER_FP32_SVE(sve_fp32_elementwise_unary),
+        nullptr,
     },
     {
         "sve_fp16_elementwise_unary",
@@ -59,6 +127,7 @@
             return (data.dt == DataType::F16 && data.isa.sve && data.isa.fp16);
         },
         REGISTER_FP16_SVE(sve_fp16_elementwise_unary),
+        nullptr,
     },
     {
         "sve_s32_elementwise_unary",
@@ -67,6 +136,7 @@
             return (data.dt == DataType::S32 && data.isa.sve);
         },
         REGISTER_INTEGER_SVE(sve_s32_elementwise_unary),
+        nullptr,
     },
     {
         "neon_fp32_elementwise_unary",
@@ -75,6 +145,7 @@
             return data.dt == DataType::F32;
         },
         REGISTER_FP32_NEON(neon_fp32_elementwise_unary),
+        nullptr,
     },
     {
         "neon_fp16_elementwise_unary",
@@ -83,6 +154,7 @@
             return data.dt == DataType::F16 && data.isa.fp16;
         },
         REGISTER_FP16_NEON(neon_fp16_elementwise_unary),
+        nullptr,
     },
     {
         "neon_s32_elementwise_unary",
@@ -91,7 +163,28 @@
             return data.dt == DataType::S32;
         },
         REGISTER_INTEGER_NEON(neon_s32_elementwise_unary),
+        nullptr,
     },
+#ifdef __aarch64__
+    {
+        "sve_q8_elementwise_unary",
+        [](const DataTypeISASelectorData & data)
+        {
+            return (data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve;
+        },
+        REGISTER_QASYMM8_SVE(sve_q8_elementwise_unary),
+        &q8_prepare_lut,
+        },
+    {
+        "neon_q8_elementwise_unary",
+        [](const DataTypeISASelectorData & data)
+        {
+            return data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED;
+        },
+        REGISTER_QASYMM8_NEON(neon_q8_elementwise_unary),
+        &q8_prepare_lut,
+    },
+#endif // __aarch64__
 };
 
 } // namespace
@@ -112,6 +205,11 @@
         return;
     }
 
+    if(uk->prepare_func != nullptr)
+    {
+        _lut = uk->prepare_func(op, &src, &dst);
+    }
+
     auto shape_and_window = compute_output_shape_and_window(src.tensor_shape());
     auto_init_if_empty(dst, shape_and_window.first, 1, src.data_type());
     ICpuKernel::configure(shape_and_window.second);
@@ -132,11 +230,11 @@
         case ElementWiseUnary::LOG:
         case ElementWiseUnary::ROUND:
         case ElementWiseUnary::SIN:
-            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32);
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
             break;
         case ElementWiseUnary::NEG:
         case ElementWiseUnary::ABS:
-            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::S32);
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::S32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
             break;
         default:
             ARM_COMPUTE_ERROR("ElementWiseUnary operation not supported");
@@ -157,7 +255,7 @@
     auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
     auto dst = tensors.get_tensor(TensorType::ACL_DST);
 
-    _run_method(src, dst, window, _op);
+    _run_method(src, dst, window, _op, _lut.get());
 }
 
 const char *CpuElementwiseUnaryKernel::name() const
diff --git a/src/cpu/kernels/CpuElementwiseUnaryKernel.h b/src/cpu/kernels/CpuElementwiseUnaryKernel.h
index 138049a..00188f0 100644
--- a/src/cpu/kernels/CpuElementwiseUnaryKernel.h
+++ b/src/cpu/kernels/CpuElementwiseUnaryKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -42,7 +42,8 @@
 class CpuElementwiseUnaryKernel : public ICpuKernel<CpuElementwiseUnaryKernel>
 {
 private:
-    using ElementwiseUnaryUkernelPtr = std::add_pointer<void(const ITensor *, ITensor *, const Window &, ElementWiseUnary)>::type;
+    using ElementwiseUnaryUkernelPtr = std::add_pointer<void(const ITensor *, ITensor *, const Window &, ElementWiseUnary, const uint8_t *)>::type;
+    using ElementwiseUnaryPreparePtr = std::add_pointer<std::unique_ptr<uint8_t[]>(ElementWiseUnary op, const ITensorInfo *, const ITensorInfo *)>::type;
 
 public:
     CpuElementwiseUnaryKernel() = default;
@@ -72,6 +73,7 @@
         const char                  *name;
         const DataTypeISASelectorPtr is_selected;
         ElementwiseUnaryUkernelPtr   ukernel;
+        ElementwiseUnaryPreparePtr   prepare_func;
     };
 
     static const std::vector<ElementwiseUnaryKernel> &get_available_kernels();
@@ -80,6 +82,7 @@
     ElementWiseUnary           _op{};
     ElementwiseUnaryUkernelPtr _run_method{ nullptr };
     std::string                _name{};
+    std::unique_ptr<uint8_t[]> _lut{};
 };
 } // namespace kernels
 } // namespace cpu
diff --git a/src/cpu/kernels/activation/generic/neon/lut.cpp b/src/cpu/kernels/activation/generic/neon/lut.cpp
index 8ceb7d8..90690ff 100644
--- a/src/cpu/kernels/activation/generic/neon/lut.cpp
+++ b/src/cpu/kernels/activation/generic/neon/lut.cpp
@@ -23,394 +23,12 @@
  */
 
 #include "arm_compute/core/Helpers.h"
-
-#include <arm_neon.h>
-#include <cstdint>
+#include "src/cpu/kernels/lut/list.h"
 
 namespace arm_compute
 {
 namespace cpu
 {
-namespace
-{
-#ifdef __aarch64__
-
-void substitute_bytes_neon(
-    const uint8_t        *table,
-    size_t                num_strings,
-    size_t                string_length,
-    const uint8_t *const *input,
-    uint8_t *const       *output)
-{
-    __asm__ __volatile__(
-        "ldr q16, [%x[table], #0x0]\n"
-        "ldr q17, [%x[table], #0x10]\n"
-        "mov x23, #0x0\n"
-        "ldr q18, [%x[table], #0x20]\n"
-        "ldr q19, [%x[table], #0x30]\n"
-        "ldr q20, [%x[table], #0x40]\n"
-        "ldr q21, [%x[table], #0x50]\n"
-        "ldr q22, [%x[table], #0x60]\n"
-        "ldr q23, [%x[table], #0x70]\n"
-        "ldr q24, [%x[table], #0x80]\n"
-        "ldr q25, [%x[table], #0x90]\n"
-        "ldr q26, [%x[table], #0xa0]\n"
-        "ldr q27, [%x[table], #0xb0]\n"
-        "ldr q28, [%x[table], #0xc0]\n"
-        "ldr q29, [%x[table], #0xd0]\n"
-        "ldr q30, [%x[table], #0xe0]\n"
-        "ldr q31, [%x[table], #0xf0]\n"
-        "1:" // string loop
-        "ldr x22, [%x[input], x23, LSL #0x3]\n"
-        "ldr x21, [%x[output], x23, LSL #0x3]\n"
-        "movi v11.16b, #0x40\n"
-        "movi v10.16b, #0x80\n"
-        "movi v9.16b, #0xc0\n"
-        "mov x20, %x[string_length]\n"
-        "2:" // 4 rounds: width loop
-        "cmp x20, #0x30\n"
-        "bge 27f\n"
-        "tbz x20, #5, 10f\n"
-        "ld1 { v8.16b }, [x22], #0x10\n"
-        "ld1 { v13.16b }, [x22], #0x10\n"
-        "tbz x20, #3, 6f\n"
-        "ldr d12, [x22], #0x8\n"
-        "tbz x20, #2, 4f\n"
-        "ld1 { v12.s }[2], [x22], #0x4\n"
-        "tbz x20, #1, 3f\n"
-        "ld1 { v12.h }[6], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[14], [x22]\n"
-        "b 26f\n"
-        "3:" // 4 rounds: Partial load: partial_1_44
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[12], [x22]\n"
-        "b 26f\n"
-        "4:" // 4 rounds: Partial load: partial_2_40
-        "tbz x20, #1, 5f\n"
-        "ld1 { v12.h }[4], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[10], [x22]\n"
-        "b 26f\n"
-        "5:" // 4 rounds: Partial load: partial_1_40
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[8], [x22]\n"
-        "b 26f\n"
-        "6:" // 4 rounds: Partial load: partial_4_32
-        "tbz x20, #2, 8f\n"
-        "ldr s12, [x22], #0x4\n"
-        "tbz x20, #1, 7f\n"
-        "ld1 { v12.h }[2], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[6], [x22]\n"
-        "b 26f\n"
-        "7:" // 4 rounds: Partial load: partial_1_36
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[4], [x22]\n"
-        "b 26f\n"
-        "8:" // 4 rounds: Partial load: partial_2_32
-        "tbz x20, #1, 9f\n"
-        "ldr h12, [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v12.b }[2], [x22]\n"
-        "b 26f\n"
-        "9:" // 4 rounds: Partial load: partial_1_32
-        "tbz x20, #0, 26f\n"
-        "ldr b12, [x22, #0x0]\n"
-        "b 26f\n"
-        "10:" // 4 rounds: Partial load: partial_16_0
-        "tbz x20, #4, 18f\n"
-        "ld1 { v8.16b }, [x22], #0x10\n"
-        "tbz x20, #3, 14f\n"
-        "ldr d13, [x22], #0x8\n"
-        "tbz x20, #2, 12f\n"
-        "ld1 { v13.s }[2], [x22], #0x4\n"
-        "tbz x20, #1, 11f\n"
-        "ld1 { v13.h }[6], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[14], [x22]\n"
-        "b 26f\n"
-        "11:" // 4 rounds: Partial load: partial_1_28
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[12], [x22]\n"
-        "b 26f\n"
-        "12:" // 4 rounds: Partial load: partial_2_24
-        "tbz x20, #1, 13f\n"
-        "ld1 { v13.h }[4], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[10], [x22]\n"
-        "b 26f\n"
-        "13:" // 4 rounds: Partial load: partial_1_24
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[8], [x22]\n"
-        "b 26f\n"
-        "14:" // 4 rounds: Partial load: partial_4_16
-        "tbz x20, #2, 16f\n"
-        "ldr s13, [x22], #0x4\n"
-        "tbz x20, #1, 15f\n"
-        "ld1 { v13.h }[2], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[6], [x22]\n"
-        "b 26f\n"
-        "15:" // 4 rounds: Partial load: partial_1_20
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[4], [x22]\n"
-        "b 26f\n"
-        "16:" // 4 rounds: Partial load: partial_2_16
-        "tbz x20, #1, 17f\n"
-        "ldr h13, [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v13.b }[2], [x22]\n"
-        "b 26f\n"
-        "17:" // 4 rounds: Partial load: partial_1_16
-        "tbz x20, #0, 26f\n"
-        "ldr b13, [x22, #0x0]\n"
-        "b 26f\n"
-        "18:" // 4 rounds: Partial load: partial_8_0
-        "tbz x20, #3, 22f\n"
-        "ldr d8, [x22], #0x8\n"
-        "tbz x20, #2, 20f\n"
-        "ld1 { v8.s }[2], [x22], #0x4\n"
-        "tbz x20, #1, 19f\n"
-        "ld1 { v8.h }[6], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[14], [x22]\n"
-        "b 26f\n"
-        "19:" // 4 rounds: Partial load: partial_1_12
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[12], [x22]\n"
-        "b 26f\n"
-        "20:" // 4 rounds: Partial load: partial_2_8
-        "tbz x20, #1, 21f\n"
-        "ld1 { v8.h }[4], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[10], [x22]\n"
-        "b 26f\n"
-        "21:" // 4 rounds: Partial load: partial_1_8
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[8], [x22]\n"
-        "b 26f\n"
-        "22:" // 4 rounds: Partial load: partial_4_0
-        "tbz x20, #2, 24f\n"
-        "ldr s8, [x22], #0x4\n"
-        "tbz x20, #1, 23f\n"
-        "ld1 { v8.h }[2], [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[6], [x22]\n"
-        "b 26f\n"
-        "23:" // 4 rounds: Partial load: partial_1_4
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[4], [x22]\n"
-        "b 26f\n"
-        "24:" // 4 rounds: Partial load: partial_2_0
-        "tbz x20, #1, 25f\n"
-        "ldr h8, [x22], #0x2\n"
-        "tbz x20, #0, 26f\n"
-        "ld1 { v8.b }[2], [x22]\n"
-        "b 26f\n"
-        "25:" // 4 rounds: Partial load: partial_1_0
-        "ldr b8, [x22, #0x0]\n"
-        "26:" // 4 rounds: Partial load: Done
-        "b 28f\n"
-        "27:" // 4 rounds: Full load
-        "ldr q8, [x22, #0x0]\n"
-        "ldr q13, [x22, #0x10]\n"
-        "ldr q12, [x22, #0x20]\n"
-        "add x22, x22, #0x30\n"
-        "28:" // 4 rounds: Load done
-        "sub v0.16b, v8.16b, v11.16b\n"
-        "sub v7.16b, v8.16b, v10.16b\n"
-        "tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b\n"
-        "sub v6.16b, v8.16b, v9.16b\n"
-        "sub v5.16b, v13.16b, v11.16b\n"
-        "tbl v8.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v8.16b\n"
-        "sub v4.16b, v13.16b, v10.16b\n"
-        "sub v3.16b, v13.16b, v9.16b\n"
-        "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
-        "sub v2.16b, v12.16b, v11.16b\n"
-        "sub v1.16b, v12.16b, v10.16b\n"
-        "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
-        "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
-        "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
-        "orr v8.16b, v8.16b, v0.16b\n"
-        "sub v0.16b, v12.16b, v9.16b\n"
-        "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
-        "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
-        "tbl v12.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v12.16b\n"
-        "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
-        "orr v7.16b, v7.16b, v6.16b\n"
-        "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
-        "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
-        "orr v13.16b, v13.16b, v5.16b\n"
-        "orr v4.16b, v4.16b, v3.16b\n"
-        "orr v12.16b, v12.16b, v2.16b\n"
-        "cmp x20, #0x30\n"
-        "orr v1.16b, v1.16b, v0.16b\n"
-        "orr v8.16b, v8.16b, v7.16b\n"
-        "orr v13.16b, v13.16b, v4.16b\n"
-        "orr v12.16b, v12.16b, v1.16b\n"
-        "bge 53f\n"
-        "tbz x20, #5, 36f\n"
-        "st1 { v8.16b }, [x21], #0x10\n"
-        "st1 { v13.16b }, [x21], #0x10\n"
-        "tbz x20, #3, 32f\n"
-        "str d12, [x21], #0x8\n"
-        "tbz x20, #2, 30f\n"
-        "st1 { v12.s }[2], [x21], #0x4\n"
-        "tbz x20, #1, 29f\n"
-        "st1 { v12.h }[6], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[14], [x21]\n"
-        "b 52f\n"
-        "29:" // 4 rounds: Partial writeback: partial_1_44
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[12], [x21]\n"
-        "b 52f\n"
-        "30:" // 4 rounds: Partial writeback: partial_2_40
-        "tbz x20, #1, 31f\n"
-        "st1 { v12.h }[4], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[10], [x21]\n"
-        "b 52f\n"
-        "31:" // 4 rounds: Partial writeback: partial_1_40
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[8], [x21]\n"
-        "b 52f\n"
-        "32:" // 4 rounds: Partial writeback: partial_4_32
-        "tbz x20, #2, 34f\n"
-        "str s12, [x21], #0x4\n"
-        "tbz x20, #1, 33f\n"
-        "st1 { v12.h }[2], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[6], [x21]\n"
-        "b 52f\n"
-        "33:" // 4 rounds: Partial writeback: partial_1_36
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[4], [x21]\n"
-        "b 52f\n"
-        "34:" // 4 rounds: Partial writeback: partial_2_32
-        "tbz x20, #1, 35f\n"
-        "str h12, [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v12.b }[2], [x21]\n"
-        "b 52f\n"
-        "35:" // 4 rounds: Partial writeback: partial_1_32
-        "tbz x20, #0, 52f\n"
-        "str b12, [x21, #0x0]\n"
-        "b 52f\n"
-        "36:" // 4 rounds: Partial writeback: partial_16_0
-        "tbz x20, #4, 44f\n"
-        "st1 { v8.16b }, [x21], #0x10\n"
-        "tbz x20, #3, 40f\n"
-        "str d13, [x21], #0x8\n"
-        "tbz x20, #2, 38f\n"
-        "st1 { v13.s }[2], [x21], #0x4\n"
-        "tbz x20, #1, 37f\n"
-        "st1 { v13.h }[6], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[14], [x21]\n"
-        "b 52f\n"
-        "37:" // 4 rounds: Partial writeback: partial_1_28
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[12], [x21]\n"
-        "b 52f\n"
-        "38:" // 4 rounds: Partial writeback: partial_2_24
-        "tbz x20, #1, 39f\n"
-        "st1 { v13.h }[4], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[10], [x21]\n"
-        "b 52f\n"
-        "39:" // 4 rounds: Partial writeback: partial_1_24
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[8], [x21]\n"
-        "b 52f\n"
-        "40:" // 4 rounds: Partial writeback: partial_4_16
-        "tbz x20, #2, 42f\n"
-        "str s13, [x21], #0x4\n"
-        "tbz x20, #1, 41f\n"
-        "st1 { v13.h }[2], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[6], [x21]\n"
-        "b 52f\n"
-        "41:" // 4 rounds: Partial writeback: partial_1_20
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[4], [x21]\n"
-        "b 52f\n"
-        "42:" // 4 rounds: Partial writeback: partial_2_16
-        "tbz x20, #1, 43f\n"
-        "str h13, [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v13.b }[2], [x21]\n"
-        "b 52f\n"
-        "43:" // 4 rounds: Partial writeback: partial_1_16
-        "tbz x20, #0, 52f\n"
-        "str b13, [x21, #0x0]\n"
-        "b 52f\n"
-        "44:" // 4 rounds: Partial writeback: partial_8_0
-        "tbz x20, #3, 48f\n"
-        "str d8, [x21], #0x8\n"
-        "tbz x20, #2, 46f\n"
-        "st1 { v8.s }[2], [x21], #0x4\n"
-        "tbz x20, #1, 45f\n"
-        "st1 { v8.h }[6], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[14], [x21]\n"
-        "b 52f\n"
-        "45:" // 4 rounds: Partial writeback: partial_1_12
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[12], [x21]\n"
-        "b 52f\n"
-        "46:" // 4 rounds: Partial writeback: partial_2_8
-        "tbz x20, #1, 47f\n"
-        "st1 { v8.h }[4], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[10], [x21]\n"
-        "b 52f\n"
-        "47:" // 4 rounds: Partial writeback: partial_1_8
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[8], [x21]\n"
-        "b 52f\n"
-        "48:" // 4 rounds: Partial writeback: partial_4_0
-        "tbz x20, #2, 50f\n"
-        "str s8, [x21], #0x4\n"
-        "tbz x20, #1, 49f\n"
-        "st1 { v8.h }[2], [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[6], [x21]\n"
-        "b 52f\n"
-        "49:" // 4 rounds: Partial writeback: partial_1_4
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[4], [x21]\n"
-        "b 52f\n"
-        "50:" // 4 rounds: Partial writeback: partial_2_0
-        "tbz x20, #1, 51f\n"
-        "str h8, [x21], #0x2\n"
-        "tbz x20, #0, 52f\n"
-        "st1 { v8.b }[2], [x21]\n"
-        "b 52f\n"
-        "51:" // 4 rounds: Partial writeback: partial_1_0
-        "str b8, [x21, #0x0]\n"
-        "52:" // 4 rounds: Partial writeback: Done
-        "b 54f\n"
-        "53:" // 4 rounds: Full writeback
-        "str q8, [x21, #0x0]\n"
-        "str q13, [x21, #0x10]\n"
-        "str q12, [x21, #0x20]\n"
-        "add x21, x21, #0x30\n"
-        "54:" // 4 rounds: Writeback done
-        "subs x20, x20, #0x30\n"
-        "bgt 2b\n"
-        "add x23, x23, #0x1\n"
-        "cmp x23, %x[num_strings]\n"
-        "bne 1b\n"
-        :
-        : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
-        : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23");
-}
-
-#endif // __aarch64__
-} // namespace
-
 #ifdef __aarch64__
 void neon_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
 {
@@ -424,7 +42,7 @@
     {
         const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
         auto       output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-        substitute_bytes_neon(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
+        lut_u8_neon(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
     },
     input, output);
 }
diff --git a/src/cpu/kernels/activation/generic/sve/lut.cpp b/src/cpu/kernels/activation/generic/sve/lut.cpp
index b73c87e..b404266 100644
--- a/src/cpu/kernels/activation/generic/sve/lut.cpp
+++ b/src/cpu/kernels/activation/generic/sve/lut.cpp
@@ -23,627 +23,12 @@
  */
 
 #include "arm_compute/core/Helpers.h"
-
-#include <arm_neon.h>
-#include <cstdint>
+#include "src/cpu/kernels/lut/list.h"
 
 namespace arm_compute
 {
 namespace cpu
 {
-namespace
-{
-#ifdef __aarch64__
-void substitute_bytes_sve(
-    const uint8_t        *table,
-    size_t                num_strings,
-    size_t                string_length,
-    const uint8_t *const *input,
-    uint8_t *const       *output)
-{
-    __asm__ __volatile__(
-        "ptrue p0.b\n"
-        "cntd x25\n"
-        "addvl %x[table], %x[table], #8\n"
-        "ld1b { z16.b }, p0/Z, [%x[table], #-8, MUL VL]\n"
-        "tbnz x25, #5, 1f\n"
-        "ld1b { z17.b }, p0/Z, [%x[table], #-7, MUL VL]\n"
-        "tbnz x25, #4, 1f\n"
-        "ld1b { z18.b }, p0/Z, [%x[table], #-6, MUL VL]\n"
-        "ld1b { z19.b }, p0/Z, [%x[table], #-5, MUL VL]\n"
-        "tbnz x25, #3, 1f\n"
-        "ld1b { z20.b }, p0/Z, [%x[table], #-4, MUL VL]\n"
-        "ld1b { z21.b }, p0/Z, [%x[table], #-3, MUL VL]\n"
-        "ld1b { z22.b }, p0/Z, [%x[table], #-2, MUL VL]\n"
-        "ld1b { z23.b }, p0/Z, [%x[table], #-1, MUL VL]\n"
-        "tbnz x25, #2, 1f\n"
-        "ld1b { z24.b }, p0/Z, [%x[table]]\n"
-        "ld1b { z25.b }, p0/Z, [%x[table], #1, MUL VL]\n"
-        "ld1b { z26.b }, p0/Z, [%x[table], #2, MUL VL]\n"
-        "ld1b { z27.b }, p0/Z, [%x[table], #3, MUL VL]\n"
-        "ld1b { z28.b }, p0/Z, [%x[table], #4, MUL VL]\n"
-        "ld1b { z29.b }, p0/Z, [%x[table], #5, MUL VL]\n"
-        "ld1b { z30.b }, p0/Z, [%x[table], #6, MUL VL]\n"
-        "ld1b { z31.b }, p0/Z, [%x[table], #7, MUL VL]\n"
-        "1:" // Table load done
-        "mov x24, #0x0\n"
-        "2:" // string loop
-        "ldr x23, [%x[input], x24, LSL #0x3]\n"
-        "ldr x22, [%x[output], x24, LSL #0x3]\n"
-        "tbnz x25, #5, 14f\n"
-        "tbnz x25, #4, 11f\n"
-        "tbnz x25, #3, 8f\n"
-        "tbnz x25, #2, 5f\n"
-        "mov z12.b, #0x10\n"
-        "mov x21, %x[string_length]\n"
-        "ptrue p5.b\n"
-        "ptrue p4.b\n"
-        "ptrue p3.b\n"
-        "ptrue p2.b\n"
-        "ptrue p1.b\n"
-        "ptrue p0.b\n"
-        "3:" // 16 rounds: width loop
-        "addvl x20, x21, #-6\n"
-        "cmp x20, XZR\n"
-        "bge 4f\n"
-        "mov x20, #0x0\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p5.b, XZR, x21\n"
-        "whilelt p4.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p3.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p2.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p1.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p0.b, x20, x21\n"
-        "4:" // 16 rounds: predicate OK
-        "ld1b { z11.b }, p5/Z, [x23]\n"
-        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
-        "tbl z9.b, { z16.b }, z11.b\n"
-        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
-        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
-        "sub z11.b, z11.b, z12.b\n"
-        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
-        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
-        "tbl z4.b, { z16.b }, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        "tbl z3.b, { z16.b }, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        "tbl z2.b, { z16.b }, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        "tbl z1.b, { z16.b }, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        "tbl z0.b, { z16.b }, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2f09 // tbx z9.b, z24.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2f04 // tbx z4.b, z24.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282f03 // tbx z3.b, z24.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272f02 // tbx z2.b, z24.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262f01 // tbx z1.b, z24.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252f00 // tbx z0.b, z24.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2f29 // tbx z9.b, z25.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2f24 // tbx z4.b, z25.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282f23 // tbx z3.b, z25.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272f22 // tbx z2.b, z25.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262f21 // tbx z1.b, z25.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252f20 // tbx z0.b, z25.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2f49 // tbx z9.b, z26.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2f44 // tbx z4.b, z26.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282f43 // tbx z3.b, z26.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272f42 // tbx z2.b, z26.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262f41 // tbx z1.b, z26.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252f40 // tbx z0.b, z26.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2f69 // tbx z9.b, z27.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2f64 // tbx z4.b, z27.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282f63 // tbx z3.b, z27.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272f62 // tbx z2.b, z27.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262f61 // tbx z1.b, z27.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252f60 // tbx z0.b, z27.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2f89 // tbx z9.b, z28.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2f84 // tbx z4.b, z28.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282f83 // tbx z3.b, z28.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272f82 // tbx z2.b, z28.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262f81 // tbx z1.b, z28.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252f80 // tbx z0.b, z28.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2fa9 // tbx z9.b, z29.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2fa4 // tbx z4.b, z29.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282fa3 // tbx z3.b, z29.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272fa2 // tbx z2.b, z29.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262fa1 // tbx z1.b, z29.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252fa0 // tbx z0.b, z29.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "addvl x21, x21, #-6\n"
-        ".inst 0x052b2fc9 // tbx z9.b, z30.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2fc4 // tbx z4.b, z30.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282fc3 // tbx z3.b, z30.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272fc2 // tbx z2.b, z30.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262fc1 // tbx z1.b, z30.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252fc0 // tbx z0.b, z30.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "cmp x21, XZR\n"
-        ".inst 0x052b2fe9 // tbx z9.b, z31.b, z11.b\n"
-        ".inst 0x052a2fe4 // tbx z4.b, z31.b, z10.b\n"
-        ".inst 0x05282fe3 // tbx z3.b, z31.b, z8.b\n"
-        "st1b { z9.b }, p5, [x22]\n"
-        ".inst 0x05272fe2 // tbx z2.b, z31.b, z7.b\n"
-        ".inst 0x05262fe1 // tbx z1.b, z31.b, z6.b\n"
-        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
-        ".inst 0x05252fe0 // tbx z0.b, z31.b, z5.b\n"
-        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
-        "addvl x23, x23, #6\n"
-        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
-        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
-        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
-        "addvl x22, x22, #6\n"
-        "bgt 3b\n"
-        "b 17f\n"
-        "5:" // 256 bits
-        "mov z12.b, #0x20\n"
-        "mov x21, %x[string_length]\n"
-        "ptrue p5.b\n"
-        "ptrue p4.b\n"
-        "ptrue p3.b\n"
-        "ptrue p2.b\n"
-        "ptrue p1.b\n"
-        "ptrue p0.b\n"
-        "6:" // 8 rounds: width loop
-        "addvl x20, x21, #-6\n"
-        "cmp x20, XZR\n"
-        "bge 7f\n"
-        "mov x20, #0x0\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p5.b, XZR, x21\n"
-        "whilelt p4.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p3.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p2.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p1.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p0.b, x20, x21\n"
-        "7:" // 8 rounds: predicate OK
-        "ld1b { z11.b }, p5/Z, [x23]\n"
-        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
-        "tbl z9.b, { z16.b }, z11.b\n"
-        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
-        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
-        "sub z11.b, z11.b, z12.b\n"
-        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
-        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
-        "tbl z4.b, { z16.b }, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        "tbl z3.b, { z16.b }, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        "tbl z2.b, { z16.b }, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        "tbl z1.b, { z16.b }, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        "tbl z0.b, { z16.b }, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "addvl x21, x21, #-6\n"
-        ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "cmp x21, XZR\n"
-        ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
-        ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
-        ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
-        "st1b { z9.b }, p5, [x22]\n"
-        ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
-        ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
-        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
-        ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
-        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
-        "addvl x23, x23, #6\n"
-        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
-        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
-        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
-        "addvl x22, x22, #6\n"
-        "bgt 6b\n"
-        "b 17f\n"
-        "8:" // 512 bits
-        "mov z12.b, #0x40\n"
-        "mov x21, %x[string_length]\n"
-        "ptrue p5.b\n"
-        "ptrue p4.b\n"
-        "ptrue p3.b\n"
-        "ptrue p2.b\n"
-        "ptrue p1.b\n"
-        "ptrue p0.b\n"
-        "9:" // 4 rounds: width loop
-        "addvl x20, x21, #-6\n"
-        "cmp x20, XZR\n"
-        "bge 10f\n"
-        "mov x20, #0x0\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p5.b, XZR, x21\n"
-        "whilelt p4.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p3.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p2.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p1.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p0.b, x20, x21\n"
-        "10:" // 4 rounds: predicate OK
-        "ld1b { z11.b }, p5/Z, [x23]\n"
-        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
-        "tbl z9.b, { z16.b }, z11.b\n"
-        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
-        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
-        "sub z11.b, z11.b, z12.b\n"
-        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
-        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
-        "tbl z4.b, { z16.b }, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        "tbl z3.b, { z16.b }, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        "tbl z2.b, { z16.b }, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        "tbl z1.b, { z16.b }, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        "tbl z0.b, { z16.b }, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "addvl x21, x21, #-6\n"
-        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
-        "sub z11.b, z11.b, z12.b\n"
-        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "cmp x21, XZR\n"
-        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
-        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
-        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
-        "st1b { z9.b }, p5, [x22]\n"
-        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
-        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
-        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
-        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
-        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
-        "addvl x23, x23, #6\n"
-        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
-        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
-        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
-        "addvl x22, x22, #6\n"
-        "bgt 9b\n"
-        "b 17f\n"
-        "11:" // 1024 bits
-        "mov z12.b, #0x80\n"
-        "mov x21, %x[string_length]\n"
-        "ptrue p5.b\n"
-        "ptrue p4.b\n"
-        "ptrue p3.b\n"
-        "ptrue p2.b\n"
-        "ptrue p1.b\n"
-        "ptrue p0.b\n"
-        "12:" // 2 rounds: width loop
-        "addvl x20, x21, #-6\n"
-        "cmp x20, XZR\n"
-        "bge 13f\n"
-        "mov x20, #0x0\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p5.b, XZR, x21\n"
-        "whilelt p4.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p3.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p2.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p1.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p0.b, x20, x21\n"
-        "13:" // 2 rounds: predicate OK
-        "ld1b { z11.b }, p5/Z, [x23]\n"
-        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
-        "addvl x21, x21, #-6\n"
-        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
-        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
-        "tbl z9.b, { z16.b }, z11.b\n"
-        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
-        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
-        "sub z11.b, z11.b, z12.b\n"
-        "tbl z4.b, { z16.b }, z10.b\n"
-        "sub z10.b, z10.b, z12.b\n"
-        "tbl z3.b, { z16.b }, z8.b\n"
-        "sub z8.b, z8.b, z12.b\n"
-        "tbl z2.b, { z16.b }, z7.b\n"
-        "sub z7.b, z7.b, z12.b\n"
-        "tbl z1.b, { z16.b }, z6.b\n"
-        "sub z6.b, z6.b, z12.b\n"
-        "tbl z0.b, { z16.b }, z5.b\n"
-        "sub z5.b, z5.b, z12.b\n"
-        "cmp x21, XZR\n"
-        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
-        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
-        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
-        "st1b { z9.b }, p5, [x22]\n"
-        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
-        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
-        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
-        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
-        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
-        "addvl x23, x23, #6\n"
-        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
-        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
-        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
-        "addvl x22, x22, #6\n"
-        "bgt 12b\n"
-        "b 17f\n"
-        "14:" // 2048 bits
-        "mov x21, %x[string_length]\n"
-        "ptrue p5.b\n"
-        "ptrue p4.b\n"
-        "ptrue p3.b\n"
-        "ptrue p2.b\n"
-        "ptrue p1.b\n"
-        "ptrue p0.b\n"
-        "15:" // 1 rounds: width loop
-        "addvl x20, x21, #-6\n"
-        "cmp x20, XZR\n"
-        "bge 16f\n"
-        "mov x20, #0x0\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p5.b, XZR, x21\n"
-        "whilelt p4.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p3.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p2.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p1.b, x20, x21\n"
-        "addvl x20, x20, #1\n"
-        "whilelt p0.b, x20, x21\n"
-        "16:" // 1 rounds: predicate OK
-        "addvl x21, x21, #-6\n"
-        "ld1b { z11.b }, p5/Z, [x23]\n"
-        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
-        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
-        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
-        "cmp x21, XZR\n"
-        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
-        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
-        "tbl z9.b, { z16.b }, z11.b\n"
-        "tbl z4.b, { z16.b }, z10.b\n"
-        "tbl z3.b, { z16.b }, z8.b\n"
-        "st1b { z9.b }, p5, [x22]\n"
-        "tbl z2.b, { z16.b }, z7.b\n"
-        "tbl z1.b, { z16.b }, z6.b\n"
-        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
-        "tbl z0.b, { z16.b }, z5.b\n"
-        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
-        "addvl x23, x23, #6\n"
-        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
-        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
-        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
-        "addvl x22, x22, #6\n"
-        "bgt 15b\n"
-        "17:" // SVE body done
-        "add x24, x24, #0x1\n"
-        "cmp x24, %x[num_strings]\n"
-        "bne 2b\n"
-        : [table] "+&r"(table)
-        : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length)
-        : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31");
-}
-#endif // __aarch64__
-} // namespace
-
 #ifdef __aarch64__
 void sve_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
 {
@@ -657,7 +42,7 @@
     {
         const auto input_ptr  = input.ptr();
         auto       output_ptr = output.ptr();
-        substitute_bytes_sve(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
+        lut_u8_sve(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
     },
     input, output);
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
index 976d006..b2833c2 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,8 +29,9 @@
 {
 namespace cpu
 {
-void neon_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_op<__fp16>(in, out, window, op);
 }
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
index 21f4d9d..6566821 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -28,8 +28,9 @@
 {
 namespace cpu
 {
-void neon_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_op<float>(in, out, window, op);
 }
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
index ef3120e..dfe5e30 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -28,8 +28,9 @@
 {
 namespace cpu
 {
-void neon_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_op<int32_t>(in, out, window, op);
 }
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp
new file mode 100644
index 0000000..08bb7f2
--- /dev/null
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+
+void neon_q8_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
+{
+    ARM_COMPUTE_UNUSED(op);
+
+    auto win = window;
+    const auto window_end_x = window.x().end();
+    win.set(0, Window::Dimension(0, 1, 1));
+
+    Iterator src_it(in, win);
+    Iterator dst_it(out, win);
+
+    execute_window_loop(win, [&](const Coordinates &) {
+        const auto src_ptr = src_it.ptr();
+        auto dst_ptr = dst_it.ptr();
+
+        lut_u8_neon(lut, 1, window_end_x, &src_ptr, &dst_ptr);
+    },
+    src_it, dst_it);
+}
+
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
index ba29b3d..01567a7 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,10 +29,11 @@
 {
 namespace cpu
 {
-void sve_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_sve_op<float16_t>(in, out, window, op);
 }
 }
 } // namespace arm_compute
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
\ No newline at end of file
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
index c5222c5..47645ff 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,8 +29,9 @@
 {
 namespace cpu
 {
-void sve_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_sve_op<float32_t>(in, out, window, op);
 }
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
index 984056a..068c3f7 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,8 +29,9 @@
 {
 namespace cpu
 {
-void sve_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 {
+    ARM_COMPUTE_UNUSED(lut);
     return elementwise_sve_op<int32_t>(in, out, window, op);
 }
 }
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp
new file mode 100644
index 0000000..b68f691
--- /dev/null
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+void sve_q8_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
+{
+    ARM_COMPUTE_UNUSED(op);
+
+    auto win = window;
+    const auto window_end_x = window.x().end();
+    win.set(0, Window::Dimension(0, 1, 1));
+
+    Iterator src_it(in, win);
+    Iterator dst_it(out, win);
+
+    execute_window_loop(win, [&](const Coordinates &) {
+        const auto src_ptr = src_it.ptr();
+        auto dst_ptr = dst_it.ptr();
+
+        lut_u8_sve(lut, 1, window_end_x, &src_ptr, &dst_ptr);
+    },
+    src_it, dst_it);
+}
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/elementwise_unary/list.h b/src/cpu/kernels/elementwise_unary/list.h
index 2a41b74..04c3bb6 100644
--- a/src/cpu/kernels/elementwise_unary/list.h
+++ b/src/cpu/kernels/elementwise_unary/list.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,17 +32,19 @@
 namespace cpu
 {
 #define DECLARE_ELEMETWISE_UNARY_KERNEL(func_name) \
-    void func_name(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+    void func_name(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
 
 DECLARE_ELEMETWISE_UNARY_KERNEL(sve_fp32_elementwise_unary);
 DECLARE_ELEMETWISE_UNARY_KERNEL(sve_fp16_elementwise_unary);
 DECLARE_ELEMETWISE_UNARY_KERNEL(sve_s32_elementwise_unary);
+DECLARE_ELEMETWISE_UNARY_KERNEL(sve_q8_elementwise_unary);
 DECLARE_ELEMETWISE_UNARY_KERNEL(neon_fp32_elementwise_unary);
 DECLARE_ELEMETWISE_UNARY_KERNEL(neon_fp16_elementwise_unary);
 DECLARE_ELEMETWISE_UNARY_KERNEL(neon_s32_elementwise_unary);
+DECLARE_ELEMETWISE_UNARY_KERNEL(neon_q8_elementwise_unary);
 
 #undef DECLARE_ELEMETWISE_UNARY_KERNEL
 
 } // namespace cpu
 } // namespace arm_compute
-#endif // SRC_CORE_KERNELS_ELEMETWISE_UNARY_LIST_H
\ No newline at end of file
+#endif // SRC_CORE_KERNELS_ELEMETWISE_UNARY_LIST_H
diff --git a/src/cpu/kernels/lut/generic/neon/u8.cpp b/src/cpu/kernels/lut/generic/neon/u8.cpp
new file mode 100644
index 0000000..8ab647b
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/neon/u8.cpp
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+
+void lut_u8_neon(
+    const uint8_t        *table,
+    size_t                num_strings,
+    size_t                string_length,
+    const uint8_t *const *input,
+    uint8_t *const       *output)
+{
+    __asm__ __volatile__(
+        "ldr q16, [%x[table], #0x0]\n"
+        "ldr q17, [%x[table], #0x10]\n"
+        "mov x23, #0x0\n"
+        "ldr q18, [%x[table], #0x20]\n"
+        "ldr q19, [%x[table], #0x30]\n"
+        "ldr q20, [%x[table], #0x40]\n"
+        "ldr q21, [%x[table], #0x50]\n"
+        "ldr q22, [%x[table], #0x60]\n"
+        "ldr q23, [%x[table], #0x70]\n"
+        "ldr q24, [%x[table], #0x80]\n"
+        "ldr q25, [%x[table], #0x90]\n"
+        "ldr q26, [%x[table], #0xa0]\n"
+        "ldr q27, [%x[table], #0xb0]\n"
+        "ldr q28, [%x[table], #0xc0]\n"
+        "ldr q29, [%x[table], #0xd0]\n"
+        "ldr q30, [%x[table], #0xe0]\n"
+        "ldr q31, [%x[table], #0xf0]\n"
+        "1:" // string loop
+        "ldr x22, [%x[input], x23, LSL #0x3]\n"
+        "ldr x21, [%x[output], x23, LSL #0x3]\n"
+        "movi v11.16b, #0x40\n"
+        "movi v10.16b, #0x80\n"
+        "movi v9.16b, #0xc0\n"
+        "mov x20, %x[string_length]\n"
+        "2:" // 4 rounds: width loop
+        "cmp x20, #0x30\n"
+        "bge 27f\n"
+        "tbz x20, #5, 10f\n"
+        "ld1 { v8.16b }, [x22], #0x10\n"
+        "ld1 { v13.16b }, [x22], #0x10\n"
+        "tbz x20, #3, 6f\n"
+        "ldr d12, [x22], #0x8\n"
+        "tbz x20, #2, 4f\n"
+        "ld1 { v12.s }[2], [x22], #0x4\n"
+        "tbz x20, #1, 3f\n"
+        "ld1 { v12.h }[6], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[14], [x22]\n"
+        "b 26f\n"
+        "3:" // 4 rounds: Partial load: partial_1_44
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[12], [x22]\n"
+        "b 26f\n"
+        "4:" // 4 rounds: Partial load: partial_2_40
+        "tbz x20, #1, 5f\n"
+        "ld1 { v12.h }[4], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[10], [x22]\n"
+        "b 26f\n"
+        "5:" // 4 rounds: Partial load: partial_1_40
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[8], [x22]\n"
+        "b 26f\n"
+        "6:" // 4 rounds: Partial load: partial_4_32
+        "tbz x20, #2, 8f\n"
+        "ldr s12, [x22], #0x4\n"
+        "tbz x20, #1, 7f\n"
+        "ld1 { v12.h }[2], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[6], [x22]\n"
+        "b 26f\n"
+        "7:" // 4 rounds: Partial load: partial_1_36
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[4], [x22]\n"
+        "b 26f\n"
+        "8:" // 4 rounds: Partial load: partial_2_32
+        "tbz x20, #1, 9f\n"
+        "ldr h12, [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v12.b }[2], [x22]\n"
+        "b 26f\n"
+        "9:" // 4 rounds: Partial load: partial_1_32
+        "tbz x20, #0, 26f\n"
+        "ldr b12, [x22, #0x0]\n"
+        "b 26f\n"
+        "10:" // 4 rounds: Partial load: partial_16_0
+        "tbz x20, #4, 18f\n"
+        "ld1 { v8.16b }, [x22], #0x10\n"
+        "tbz x20, #3, 14f\n"
+        "ldr d13, [x22], #0x8\n"
+        "tbz x20, #2, 12f\n"
+        "ld1 { v13.s }[2], [x22], #0x4\n"
+        "tbz x20, #1, 11f\n"
+        "ld1 { v13.h }[6], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[14], [x22]\n"
+        "b 26f\n"
+        "11:" // 4 rounds: Partial load: partial_1_28
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[12], [x22]\n"
+        "b 26f\n"
+        "12:" // 4 rounds: Partial load: partial_2_24
+        "tbz x20, #1, 13f\n"
+        "ld1 { v13.h }[4], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[10], [x22]\n"
+        "b 26f\n"
+        "13:" // 4 rounds: Partial load: partial_1_24
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[8], [x22]\n"
+        "b 26f\n"
+        "14:" // 4 rounds: Partial load: partial_4_16
+        "tbz x20, #2, 16f\n"
+        "ldr s13, [x22], #0x4\n"
+        "tbz x20, #1, 15f\n"
+        "ld1 { v13.h }[2], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[6], [x22]\n"
+        "b 26f\n"
+        "15:" // 4 rounds: Partial load: partial_1_20
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[4], [x22]\n"
+        "b 26f\n"
+        "16:" // 4 rounds: Partial load: partial_2_16
+        "tbz x20, #1, 17f\n"
+        "ldr h13, [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v13.b }[2], [x22]\n"
+        "b 26f\n"
+        "17:" // 4 rounds: Partial load: partial_1_16
+        "tbz x20, #0, 26f\n"
+        "ldr b13, [x22, #0x0]\n"
+        "b 26f\n"
+        "18:" // 4 rounds: Partial load: partial_8_0
+        "tbz x20, #3, 22f\n"
+        "ldr d8, [x22], #0x8\n"
+        "tbz x20, #2, 20f\n"
+        "ld1 { v8.s }[2], [x22], #0x4\n"
+        "tbz x20, #1, 19f\n"
+        "ld1 { v8.h }[6], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[14], [x22]\n"
+        "b 26f\n"
+        "19:" // 4 rounds: Partial load: partial_1_12
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[12], [x22]\n"
+        "b 26f\n"
+        "20:" // 4 rounds: Partial load: partial_2_8
+        "tbz x20, #1, 21f\n"
+        "ld1 { v8.h }[4], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[10], [x22]\n"
+        "b 26f\n"
+        "21:" // 4 rounds: Partial load: partial_1_8
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[8], [x22]\n"
+        "b 26f\n"
+        "22:" // 4 rounds: Partial load: partial_4_0
+        "tbz x20, #2, 24f\n"
+        "ldr s8, [x22], #0x4\n"
+        "tbz x20, #1, 23f\n"
+        "ld1 { v8.h }[2], [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[6], [x22]\n"
+        "b 26f\n"
+        "23:" // 4 rounds: Partial load: partial_1_4
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[4], [x22]\n"
+        "b 26f\n"
+        "24:" // 4 rounds: Partial load: partial_2_0
+        "tbz x20, #1, 25f\n"
+        "ldr h8, [x22], #0x2\n"
+        "tbz x20, #0, 26f\n"
+        "ld1 { v8.b }[2], [x22]\n"
+        "b 26f\n"
+        "25:" // 4 rounds: Partial load: partial_1_0
+        "ldr b8, [x22, #0x0]\n"
+        "26:" // 4 rounds: Partial load: Done
+        "b 28f\n"
+        "27:" // 4 rounds: Full load
+        "ldr q8, [x22, #0x0]\n"
+        "ldr q13, [x22, #0x10]\n"
+        "ldr q12, [x22, #0x20]\n"
+        "add x22, x22, #0x30\n"
+        "28:" // 4 rounds: Load done
+        "sub v0.16b, v8.16b, v11.16b\n"
+        "sub v7.16b, v8.16b, v10.16b\n"
+        "tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b\n"
+        "sub v6.16b, v8.16b, v9.16b\n"
+        "sub v5.16b, v13.16b, v11.16b\n"
+        "tbl v8.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v8.16b\n"
+        "sub v4.16b, v13.16b, v10.16b\n"
+        "sub v3.16b, v13.16b, v9.16b\n"
+        "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
+        "sub v2.16b, v12.16b, v11.16b\n"
+        "sub v1.16b, v12.16b, v10.16b\n"
+        "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
+        "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
+        "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
+        "orr v8.16b, v8.16b, v0.16b\n"
+        "sub v0.16b, v12.16b, v9.16b\n"
+        "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
+        "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
+        "tbl v12.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v12.16b\n"
+        "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
+        "orr v7.16b, v7.16b, v6.16b\n"
+        "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
+        "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
+        "orr v13.16b, v13.16b, v5.16b\n"
+        "orr v4.16b, v4.16b, v3.16b\n"
+        "orr v12.16b, v12.16b, v2.16b\n"
+        "cmp x20, #0x30\n"
+        "orr v1.16b, v1.16b, v0.16b\n"
+        "orr v8.16b, v8.16b, v7.16b\n"
+        "orr v13.16b, v13.16b, v4.16b\n"
+        "orr v12.16b, v12.16b, v1.16b\n"
+        "bge 53f\n"
+        "tbz x20, #5, 36f\n"
+        "st1 { v8.16b }, [x21], #0x10\n"
+        "st1 { v13.16b }, [x21], #0x10\n"
+        "tbz x20, #3, 32f\n"
+        "str d12, [x21], #0x8\n"
+        "tbz x20, #2, 30f\n"
+        "st1 { v12.s }[2], [x21], #0x4\n"
+        "tbz x20, #1, 29f\n"
+        "st1 { v12.h }[6], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[14], [x21]\n"
+        "b 52f\n"
+        "29:" // 4 rounds: Partial writeback: partial_1_44
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[12], [x21]\n"
+        "b 52f\n"
+        "30:" // 4 rounds: Partial writeback: partial_2_40
+        "tbz x20, #1, 31f\n"
+        "st1 { v12.h }[4], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[10], [x21]\n"
+        "b 52f\n"
+        "31:" // 4 rounds: Partial writeback: partial_1_40
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[8], [x21]\n"
+        "b 52f\n"
+        "32:" // 4 rounds: Partial writeback: partial_4_32
+        "tbz x20, #2, 34f\n"
+        "str s12, [x21], #0x4\n"
+        "tbz x20, #1, 33f\n"
+        "st1 { v12.h }[2], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[6], [x21]\n"
+        "b 52f\n"
+        "33:" // 4 rounds: Partial writeback: partial_1_36
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[4], [x21]\n"
+        "b 52f\n"
+        "34:" // 4 rounds: Partial writeback: partial_2_32
+        "tbz x20, #1, 35f\n"
+        "str h12, [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v12.b }[2], [x21]\n"
+        "b 52f\n"
+        "35:" // 4 rounds: Partial writeback: partial_1_32
+        "tbz x20, #0, 52f\n"
+        "str b12, [x21, #0x0]\n"
+        "b 52f\n"
+        "36:" // 4 rounds: Partial writeback: partial_16_0
+        "tbz x20, #4, 44f\n"
+        "st1 { v8.16b }, [x21], #0x10\n"
+        "tbz x20, #3, 40f\n"
+        "str d13, [x21], #0x8\n"
+        "tbz x20, #2, 38f\n"
+        "st1 { v13.s }[2], [x21], #0x4\n"
+        "tbz x20, #1, 37f\n"
+        "st1 { v13.h }[6], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[14], [x21]\n"
+        "b 52f\n"
+        "37:" // 4 rounds: Partial writeback: partial_1_28
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[12], [x21]\n"
+        "b 52f\n"
+        "38:" // 4 rounds: Partial writeback: partial_2_24
+        "tbz x20, #1, 39f\n"
+        "st1 { v13.h }[4], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[10], [x21]\n"
+        "b 52f\n"
+        "39:" // 4 rounds: Partial writeback: partial_1_24
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[8], [x21]\n"
+        "b 52f\n"
+        "40:" // 4 rounds: Partial writeback: partial_4_16
+        "tbz x20, #2, 42f\n"
+        "str s13, [x21], #0x4\n"
+        "tbz x20, #1, 41f\n"
+        "st1 { v13.h }[2], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[6], [x21]\n"
+        "b 52f\n"
+        "41:" // 4 rounds: Partial writeback: partial_1_20
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[4], [x21]\n"
+        "b 52f\n"
+        "42:" // 4 rounds: Partial writeback: partial_2_16
+        "tbz x20, #1, 43f\n"
+        "str h13, [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v13.b }[2], [x21]\n"
+        "b 52f\n"
+        "43:" // 4 rounds: Partial writeback: partial_1_16
+        "tbz x20, #0, 52f\n"
+        "str b13, [x21, #0x0]\n"
+        "b 52f\n"
+        "44:" // 4 rounds: Partial writeback: partial_8_0
+        "tbz x20, #3, 48f\n"
+        "str d8, [x21], #0x8\n"
+        "tbz x20, #2, 46f\n"
+        "st1 { v8.s }[2], [x21], #0x4\n"
+        "tbz x20, #1, 45f\n"
+        "st1 { v8.h }[6], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[14], [x21]\n"
+        "b 52f\n"
+        "45:" // 4 rounds: Partial writeback: partial_1_12
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[12], [x21]\n"
+        "b 52f\n"
+        "46:" // 4 rounds: Partial writeback: partial_2_8
+        "tbz x20, #1, 47f\n"
+        "st1 { v8.h }[4], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[10], [x21]\n"
+        "b 52f\n"
+        "47:" // 4 rounds: Partial writeback: partial_1_8
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[8], [x21]\n"
+        "b 52f\n"
+        "48:" // 4 rounds: Partial writeback: partial_4_0
+        "tbz x20, #2, 50f\n"
+        "str s8, [x21], #0x4\n"
+        "tbz x20, #1, 49f\n"
+        "st1 { v8.h }[2], [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[6], [x21]\n"
+        "b 52f\n"
+        "49:" // 4 rounds: Partial writeback: partial_1_4
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[4], [x21]\n"
+        "b 52f\n"
+        "50:" // 4 rounds: Partial writeback: partial_2_0
+        "tbz x20, #1, 51f\n"
+        "str h8, [x21], #0x2\n"
+        "tbz x20, #0, 52f\n"
+        "st1 { v8.b }[2], [x21]\n"
+        "b 52f\n"
+        "51:" // 4 rounds: Partial writeback: partial_1_0
+        "str b8, [x21, #0x0]\n"
+        "52:" // 4 rounds: Partial writeback: Done
+        "b 54f\n"
+        "53:" // 4 rounds: Full writeback
+        "str q8, [x21, #0x0]\n"
+        "str q13, [x21, #0x10]\n"
+        "str q12, [x21, #0x20]\n"
+        "add x21, x21, #0x30\n"
+        "54:" // 4 rounds: Writeback done
+        "subs x20, x20, #0x30\n"
+        "bgt 2b\n"
+        "add x23, x23, #0x1\n"
+        "cmp x23, %x[num_strings]\n"
+        "bne 1b\n"
+        :
+        : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
+        : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23");
+}
+
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/lut/generic/sve/u8.cpp b/src/cpu/kernels/lut/generic/sve/u8.cpp
new file mode 100644
index 0000000..70f3a2e
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/sve/u8.cpp
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+#ifdef __aarch64__
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+void lut_u8_sve(
+    const uint8_t        *table,
+    size_t                num_strings,
+    size_t                string_length,
+    const uint8_t *const *input,
+    uint8_t *const       *output)
+{
+    __asm__ __volatile__(
+        "ptrue p0.b\n"
+        "cntd x25\n"
+        "addvl %x[table], %x[table], #8\n"
+        "ld1b { z16.b }, p0/Z, [%x[table], #-8, MUL VL]\n"
+        "tbnz x25, #5, 1f\n"
+        "ld1b { z17.b }, p0/Z, [%x[table], #-7, MUL VL]\n"
+        "tbnz x25, #4, 1f\n"
+        "ld1b { z18.b }, p0/Z, [%x[table], #-6, MUL VL]\n"
+        "ld1b { z19.b }, p0/Z, [%x[table], #-5, MUL VL]\n"
+        "tbnz x25, #3, 1f\n"
+        "ld1b { z20.b }, p0/Z, [%x[table], #-4, MUL VL]\n"
+        "ld1b { z21.b }, p0/Z, [%x[table], #-3, MUL VL]\n"
+        "ld1b { z22.b }, p0/Z, [%x[table], #-2, MUL VL]\n"
+        "ld1b { z23.b }, p0/Z, [%x[table], #-1, MUL VL]\n"
+        "tbnz x25, #2, 1f\n"
+        "ld1b { z24.b }, p0/Z, [%x[table]]\n"
+        "ld1b { z25.b }, p0/Z, [%x[table], #1, MUL VL]\n"
+        "ld1b { z26.b }, p0/Z, [%x[table], #2, MUL VL]\n"
+        "ld1b { z27.b }, p0/Z, [%x[table], #3, MUL VL]\n"
+        "ld1b { z28.b }, p0/Z, [%x[table], #4, MUL VL]\n"
+        "ld1b { z29.b }, p0/Z, [%x[table], #5, MUL VL]\n"
+        "ld1b { z30.b }, p0/Z, [%x[table], #6, MUL VL]\n"
+        "ld1b { z31.b }, p0/Z, [%x[table], #7, MUL VL]\n"
+        "1:" // Table load done
+        "mov x24, #0x0\n"
+        "2:" // string loop
+        "ldr x23, [%x[input], x24, LSL #0x3]\n"
+        "ldr x22, [%x[output], x24, LSL #0x3]\n"
+        "tbnz x25, #5, 14f\n"
+        "tbnz x25, #4, 11f\n"
+        "tbnz x25, #3, 8f\n"
+        "tbnz x25, #2, 5f\n"
+        "mov z12.b, #0x10\n"
+        "mov x21, %x[string_length]\n"
+        "ptrue p5.b\n"
+        "ptrue p4.b\n"
+        "ptrue p3.b\n"
+        "ptrue p2.b\n"
+        "ptrue p1.b\n"
+        "ptrue p0.b\n"
+        "3:" // 16 rounds: width loop
+        "addvl x20, x21, #-6\n"
+        "cmp x20, XZR\n"
+        "bge 4f\n"
+        "mov x20, #0x0\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p5.b, XZR, x21\n"
+        "whilelt p4.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p3.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p2.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p1.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p0.b, x20, x21\n"
+        "4:" // 16 rounds: predicate OK
+        "ld1b { z11.b }, p5/Z, [x23]\n"
+        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+        "tbl z9.b, { z16.b }, z11.b\n"
+        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+        "sub z11.b, z11.b, z12.b\n"
+        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+        "tbl z4.b, { z16.b }, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        "tbl z3.b, { z16.b }, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        "tbl z2.b, { z16.b }, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        "tbl z1.b, { z16.b }, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        "tbl z0.b, { z16.b }, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2f09 // tbx z9.b, z24.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2f04 // tbx z4.b, z24.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282f03 // tbx z3.b, z24.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272f02 // tbx z2.b, z24.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262f01 // tbx z1.b, z24.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252f00 // tbx z0.b, z24.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2f29 // tbx z9.b, z25.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2f24 // tbx z4.b, z25.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282f23 // tbx z3.b, z25.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272f22 // tbx z2.b, z25.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262f21 // tbx z1.b, z25.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252f20 // tbx z0.b, z25.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2f49 // tbx z9.b, z26.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2f44 // tbx z4.b, z26.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282f43 // tbx z3.b, z26.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272f42 // tbx z2.b, z26.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262f41 // tbx z1.b, z26.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252f40 // tbx z0.b, z26.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2f69 // tbx z9.b, z27.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2f64 // tbx z4.b, z27.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282f63 // tbx z3.b, z27.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272f62 // tbx z2.b, z27.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262f61 // tbx z1.b, z27.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252f60 // tbx z0.b, z27.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2f89 // tbx z9.b, z28.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2f84 // tbx z4.b, z28.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282f83 // tbx z3.b, z28.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272f82 // tbx z2.b, z28.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262f81 // tbx z1.b, z28.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252f80 // tbx z0.b, z28.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2fa9 // tbx z9.b, z29.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2fa4 // tbx z4.b, z29.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282fa3 // tbx z3.b, z29.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272fa2 // tbx z2.b, z29.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262fa1 // tbx z1.b, z29.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252fa0 // tbx z0.b, z29.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "addvl x21, x21, #-6\n"
+        ".inst 0x052b2fc9 // tbx z9.b, z30.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2fc4 // tbx z4.b, z30.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282fc3 // tbx z3.b, z30.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272fc2 // tbx z2.b, z30.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262fc1 // tbx z1.b, z30.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252fc0 // tbx z0.b, z30.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "cmp x21, XZR\n"
+        ".inst 0x052b2fe9 // tbx z9.b, z31.b, z11.b\n"
+        ".inst 0x052a2fe4 // tbx z4.b, z31.b, z10.b\n"
+        ".inst 0x05282fe3 // tbx z3.b, z31.b, z8.b\n"
+        "st1b { z9.b }, p5, [x22]\n"
+        ".inst 0x05272fe2 // tbx z2.b, z31.b, z7.b\n"
+        ".inst 0x05262fe1 // tbx z1.b, z31.b, z6.b\n"
+        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+        ".inst 0x05252fe0 // tbx z0.b, z31.b, z5.b\n"
+        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+        "addvl x23, x23, #6\n"
+        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+        "addvl x22, x22, #6\n"
+        "bgt 3b\n"
+        "b 17f\n"
+        "5:" // 256 bits
+        "mov z12.b, #0x20\n"
+        "mov x21, %x[string_length]\n"
+        "ptrue p5.b\n"
+        "ptrue p4.b\n"
+        "ptrue p3.b\n"
+        "ptrue p2.b\n"
+        "ptrue p1.b\n"
+        "ptrue p0.b\n"
+        "6:" // 8 rounds: width loop
+        "addvl x20, x21, #-6\n"
+        "cmp x20, XZR\n"
+        "bge 7f\n"
+        "mov x20, #0x0\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p5.b, XZR, x21\n"
+        "whilelt p4.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p3.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p2.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p1.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p0.b, x20, x21\n"
+        "7:" // 8 rounds: predicate OK
+        "ld1b { z11.b }, p5/Z, [x23]\n"
+        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+        "tbl z9.b, { z16.b }, z11.b\n"
+        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+        "sub z11.b, z11.b, z12.b\n"
+        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+        "tbl z4.b, { z16.b }, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        "tbl z3.b, { z16.b }, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        "tbl z2.b, { z16.b }, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        "tbl z1.b, { z16.b }, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        "tbl z0.b, { z16.b }, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "addvl x21, x21, #-6\n"
+        ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "cmp x21, XZR\n"
+        ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+        ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+        ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+        "st1b { z9.b }, p5, [x22]\n"
+        ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+        ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+        ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+        "addvl x23, x23, #6\n"
+        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+        "addvl x22, x22, #6\n"
+        "bgt 6b\n"
+        "b 17f\n"
+        "8:" // 512 bits
+        "mov z12.b, #0x40\n"
+        "mov x21, %x[string_length]\n"
+        "ptrue p5.b\n"
+        "ptrue p4.b\n"
+        "ptrue p3.b\n"
+        "ptrue p2.b\n"
+        "ptrue p1.b\n"
+        "ptrue p0.b\n"
+        "9:" // 4 rounds: width loop
+        "addvl x20, x21, #-6\n"
+        "cmp x20, XZR\n"
+        "bge 10f\n"
+        "mov x20, #0x0\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p5.b, XZR, x21\n"
+        "whilelt p4.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p3.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p2.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p1.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p0.b, x20, x21\n"
+        "10:" // 4 rounds: predicate OK
+        "ld1b { z11.b }, p5/Z, [x23]\n"
+        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+        "tbl z9.b, { z16.b }, z11.b\n"
+        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+        "sub z11.b, z11.b, z12.b\n"
+        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+        "tbl z4.b, { z16.b }, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        "tbl z3.b, { z16.b }, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        "tbl z2.b, { z16.b }, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        "tbl z1.b, { z16.b }, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        "tbl z0.b, { z16.b }, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "addvl x21, x21, #-6\n"
+        ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+        "sub z11.b, z11.b, z12.b\n"
+        ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "cmp x21, XZR\n"
+        ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+        ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+        ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+        "st1b { z9.b }, p5, [x22]\n"
+        ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+        ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+        ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+        "addvl x23, x23, #6\n"
+        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+        "addvl x22, x22, #6\n"
+        "bgt 9b\n"
+        "b 17f\n"
+        "11:" // 1024 bits
+        "mov z12.b, #0x80\n"
+        "mov x21, %x[string_length]\n"
+        "ptrue p5.b\n"
+        "ptrue p4.b\n"
+        "ptrue p3.b\n"
+        "ptrue p2.b\n"
+        "ptrue p1.b\n"
+        "ptrue p0.b\n"
+        "12:" // 2 rounds: width loop
+        "addvl x20, x21, #-6\n"
+        "cmp x20, XZR\n"
+        "bge 13f\n"
+        "mov x20, #0x0\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p5.b, XZR, x21\n"
+        "whilelt p4.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p3.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p2.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p1.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p0.b, x20, x21\n"
+        "13:" // 2 rounds: predicate OK
+        "ld1b { z11.b }, p5/Z, [x23]\n"
+        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+        "addvl x21, x21, #-6\n"
+        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+        "tbl z9.b, { z16.b }, z11.b\n"
+        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+        "sub z11.b, z11.b, z12.b\n"
+        "tbl z4.b, { z16.b }, z10.b\n"
+        "sub z10.b, z10.b, z12.b\n"
+        "tbl z3.b, { z16.b }, z8.b\n"
+        "sub z8.b, z8.b, z12.b\n"
+        "tbl z2.b, { z16.b }, z7.b\n"
+        "sub z7.b, z7.b, z12.b\n"
+        "tbl z1.b, { z16.b }, z6.b\n"
+        "sub z6.b, z6.b, z12.b\n"
+        "tbl z0.b, { z16.b }, z5.b\n"
+        "sub z5.b, z5.b, z12.b\n"
+        "cmp x21, XZR\n"
+        ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+        ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+        ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+        "st1b { z9.b }, p5, [x22]\n"
+        ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+        ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+        ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+        "addvl x23, x23, #6\n"
+        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+        "addvl x22, x22, #6\n"
+        "bgt 12b\n"
+        "b 17f\n"
+        "14:" // 2048 bits
+        "mov x21, %x[string_length]\n"
+        "ptrue p5.b\n"
+        "ptrue p4.b\n"
+        "ptrue p3.b\n"
+        "ptrue p2.b\n"
+        "ptrue p1.b\n"
+        "ptrue p0.b\n"
+        "15:" // 1 rounds: width loop
+        "addvl x20, x21, #-6\n"
+        "cmp x20, XZR\n"
+        "bge 16f\n"
+        "mov x20, #0x0\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p5.b, XZR, x21\n"
+        "whilelt p4.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p3.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p2.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p1.b, x20, x21\n"
+        "addvl x20, x20, #1\n"
+        "whilelt p0.b, x20, x21\n"
+        "16:" // 1 rounds: predicate OK
+        "addvl x21, x21, #-6\n"
+        "ld1b { z11.b }, p5/Z, [x23]\n"
+        "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+        "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+        "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+        "cmp x21, XZR\n"
+        "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+        "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+        "tbl z9.b, { z16.b }, z11.b\n"
+        "tbl z4.b, { z16.b }, z10.b\n"
+        "tbl z3.b, { z16.b }, z8.b\n"
+        "st1b { z9.b }, p5, [x22]\n"
+        "tbl z2.b, { z16.b }, z7.b\n"
+        "tbl z1.b, { z16.b }, z6.b\n"
+        "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+        "tbl z0.b, { z16.b }, z5.b\n"
+        "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+        "addvl x23, x23, #6\n"
+        "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+        "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+        "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+        "addvl x22, x22, #6\n"
+        "bgt 15b\n"
+        "17:" // SVE body done
+        "add x24, x24, #0x1\n"
+        "cmp x24, %x[num_strings]\n"
+        "bne 2b\n"
+        : [table] "+&r"(table)
+        : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length)
+        : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31");
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // __aarch64__
diff --git a/src/cpu/kernels/lut/list.h b/src/cpu/kernels/lut/list.h
new file mode 100644
index 0000000..9749b91
--- /dev/null
+++ b/src/cpu/kernels/lut/list.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SRC_CORE_NEON_KERNELS_LUT_LIST_H
+#define SRC_CORE_NEON_KERNELS_LUT_LIST_H
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+#define DECLARE_LUT_KERNEL(func_name) \
+    void func_name( \
+        const uint8_t        *table, \
+        size_t                num_strings, \
+        size_t                string_length, \
+        const uint8_t *const *input, \
+        uint8_t *const       *output)
+
+DECLARE_LUT_KERNEL(lut_u8_neon);
+DECLARE_LUT_KERNEL(lut_u8_sve);
+
+#undef DECLARE_LUT_KERNEL
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // SRC_CORE_NEON_KERNELS_LUT_LIST_H