NormalizationLayer changes to enable fp16 in armv8a multi_isa builds

    * Moved the template arm_compute::normalize_float to impl.h because
      we need to instantiate it from both NENormalizationLayerKernel.cpp
      and src/cpu/kernels/norm_layer/generic/neon/fp16.cpp

    * Changes in filelist.json: added a new fp16.cpp file for the float16_t kernels

    * Replaced the guard __ARM_FEATURE_FP16_VECTOR_ARITHMETIC in
      NENormalizationLayerKernel by ARM_COMPUTE_ENABLE_FP16 so that
      the fp16 kernels can be compiled in for multi_isa builds

    * Moved fp32 kernels to the corresponding file
      src/cpu/kernels/norm_layer/generic/neon/fp32.cpp

    * Partially resolves MLCE-1102

Change-Id: I3f2eb2ed0b6c7f68092b17872b85082fbb5f39e2
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10739
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/Android.bp b/Android.bp
index 7681205..c4bf740 100644
--- a/Android.bp
+++ b/Android.bp
@@ -543,6 +543,8 @@
         "src/cpu/kernels/meanstddevnorm/generic/neon/fp32.cpp",
         "src/cpu/kernels/meanstddevnorm/generic/neon/impl.cpp",
         "src/cpu/kernels/meanstddevnorm/generic/neon/qasymm8.cpp",
+        "src/cpu/kernels/norm_layer/generic/neon/fp16.cpp",
+        "src/cpu/kernels/norm_layer/generic/neon/fp32.cpp",
         "src/cpu/kernels/pool2d/neon/fp16.cpp",
         "src/cpu/kernels/pool2d/neon/fp32.cpp",
         "src/cpu/kernels/pool2d/neon/nchw/all.cpp",
diff --git a/filelist.json b/filelist.json
index d8c1692..ca8b18c 100644
--- a/filelist.json
+++ b/filelist.json
@@ -1913,7 +1913,11 @@
           "common": [
             "src/core/NEON/kernels/NENormalizationLayerKernel.cpp",
             "src/runtime/NEON/functions/NENormalizationLayer.cpp"
-          ]
+          ],
+        "neon":{
+          "fp16":["src/cpu/kernels/norm_layer/generic/neon/fp16.cpp"],
+          "fp32":["src/cpu/kernels/norm_layer/generic/neon/fp32.cpp"]
+        }
         }
       },
       "Pad": {
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index a22632e..6ffc2eb 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -794,6 +794,8 @@
 	"cpu/kernels/meanstddevnorm/generic/neon/fp32.cpp",
 	"cpu/kernels/meanstddevnorm/generic/neon/impl.cpp",
 	"cpu/kernels/meanstddevnorm/generic/neon/qasymm8.cpp",
+	"cpu/kernels/norm_layer/generic/neon/fp16.cpp",
+	"cpu/kernels/norm_layer/generic/neon/fp32.cpp",
 	"cpu/kernels/pool2d/neon/fp16.cpp",
 	"cpu/kernels/pool2d/neon/fp32.cpp",
 	"cpu/kernels/pool2d/neon/nchw/all.cpp",
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 37599cd..55169b6 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -785,6 +785,8 @@
 	cpu/kernels/meanstddevnorm/generic/neon/fp32.cpp
 	cpu/kernels/meanstddevnorm/generic/neon/impl.cpp
 	cpu/kernels/meanstddevnorm/generic/neon/qasymm8.cpp
+	cpu/kernels/norm_layer/generic/neon/fp16.cpp
+	cpu/kernels/norm_layer/generic/neon/fp32.cpp
 	cpu/kernels/pool2d/neon/fp16.cpp
 	cpu/kernels/pool2d/neon/fp32.cpp
 	cpu/kernels/pool2d/neon/nchw/all.cpp
diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
index 2c61bda..8399c6c 100644
--- a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -30,6 +30,7 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
 
+#include "src/core/common/Registrars.h"
 #include "src/core/CPP/Validate.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/NormalizationHelpers.h"
@@ -37,6 +38,8 @@
 #include "src/core/NEON/NEFixedPoint.h"
 #include "src/core/NEON/NEMath.h"
 #include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/kernels/norm_layer/generic/neon/impl.h"
+#include "src/cpu/kernels/norm_layer/generic/neon/list.h"
 
 namespace arm_compute
 {
@@ -91,7 +94,6 @@
     _input_squared = input_squared;
     _output        = output;
     _norm_info     = norm_info;
-
     switch (_input->info()->data_type())
     {
         case DataType::F32:
@@ -102,33 +104,33 @@
                 {
                     if (norm_info.type() == NormType::IN_MAP_2D)
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, true>;
+                        _func = REGISTER_FP32_NEON(cpu::neon_normalize_float32_4_0_2D);
                     }
                     else
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, false>;
+                        _func = REGISTER_FP32_NEON(cpu::neon_normalize_float32_4_0);
                     }
                     break;
                 }
                 case 1:
                     if (norm_info.type() == NormType::IN_MAP_2D)
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, true>;
+                        _func = REGISTER_FP32_NEON(cpu::neon_normalize_float32_4_1_2D);
                     }
                     else
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, false>;
+                        _func = REGISTER_FP32_NEON(cpu::neon_normalize_float32_4_1);
                     }
                     break;
                 case 2:
-                    _func = &NENormalizationLayerKernel::normalize_float<float, 4, 2, false>;
+                    _func = REGISTER_FP32_NEON(cpu::neon_normalize_float32_4_2);
                     break;
                 default:
                     break;
             }
             break;
         }
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
         case DataType::F16:
         {
             switch (norm_idx)
@@ -137,33 +139,33 @@
                 {
                     if (norm_info.type() == NormType::IN_MAP_2D)
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, true>;
+                        _func = REGISTER_FP16_NEON(cpu::neon_normalize_float16_8_0_2D);
                     }
                     else
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, false>;
+                        _func = REGISTER_FP16_NEON(cpu::neon_normalize_float16_8_0);
                     }
                     break;
                 }
                 case 1:
                     if (norm_info.type() == NormType::IN_MAP_2D)
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, true>;
+                        _func = REGISTER_FP16_NEON(cpu::neon_normalize_float16_8_1_2D);
                     }
                     else
                     {
-                        _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, false>;
+                        _func = REGISTER_FP16_NEON(cpu::neon_normalize_float16_8_1);
                     }
                     break;
                 case 2:
-                    _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 2, false>;
+                    _func = REGISTER_FP16_NEON(cpu::neon_normalize_float16_8_2);
                     break;
                 default:
                     break;
             }
             break;
         }
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
         default:
             ARM_COMPUTE_ERROR("NOT SUPPORTED!");
     }
@@ -173,124 +175,6 @@
     INEKernel::configure(win);
 }
 
-template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
-void NENormalizationLayerKernel::normalize_float(const Window &window)
-{
-    /** SIMD vector tag type. */
-    using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
-
-    Window win(window);
-    win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
-    const auto window_start_x = static_cast<int>(window.x().start());
-    const auto window_end_x   = static_cast<int>(window.x().end());
-    const int  window_step_x  = S;
-
-    Iterator input(_input, win);
-    Iterator input_squared(_input_squared, win);
-    Iterator output(_output, win);
-
-    const int dim_y                      = _input->info()->data_layout() == DataLayout::NCHW ? 1 : 2;
-    const int radius                     = _norm_info.norm_size() / 2;
-    const int input_squared_stride_x     = _input_squared->info()->strides_in_bytes()[0];
-    const int input_squared_stride_slice = _input_squared->info()->strides_in_bytes()[dim];
-    const int input_squared_stride_row   = _input_squared->info()->strides_in_bytes()[dim_y];
-
-    const int max_right  = _input->info()->dimension(dim) - 1;
-    const int max_bottom = _input->info()->dimension(dim_y) - 1;
-
-    const auto coeff_vec = wrapper::vdup_n(static_cast<T>(_norm_info.scale_coeff()), ExactTagType{});
-    const auto beta_vec  = wrapper::vdup_n(static_cast<T>(_norm_info.beta()), ExactTagType{});
-    const auto kappa_vec = wrapper::vdup_n(static_cast<T>(_norm_info.kappa()), ExactTagType{});
-
-    auto sequential_normalization = [&](const int x, const Coordinates &id, const int current_row, const int first_row,
-                                        const int last_row, const T *input_ptr, const uint8_t *input_squared_start_ptr,
-                                        T *output_ptr)
-    {
-        const int current_slice = dim == 0 ? x : id[dim];
-        const int first_slice   = std::max(current_slice - radius, 0);
-        const int last_slice    = std::min(current_slice + radius, max_right);
-
-        const uint8_t *const input_squared_x_ptr = input_squared_start_ptr + x * input_squared_stride_x;
-        // Accumulate 2D In-Map values
-        auto accu = static_cast<T>(0.f);
-        for (int j = first_row; j <= last_row; ++j)
-        {
-            // Compute row displacement
-            const uint8_t *const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
-            for (int i = first_slice; i <= last_slice; ++i)
-            {
-                accu +=
-                    *reinterpret_cast<const T *>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice);
-            }
-        }
-
-        // Normalize
-        const auto normalized = std::pow(
-            accu * static_cast<T>(_norm_info.scale_coeff()) + static_cast<T>(_norm_info.kappa()), _norm_info.beta());
-        const auto normalized_pixel = (*(input_ptr + x)) / normalized;
-        *(output_ptr + x)           = normalized_pixel;
-    };
-
-    execute_window_loop(
-        win,
-        [&](const Coordinates &id)
-        {
-            const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
-            auto       output_ptr = reinterpret_cast<T *>(output.ptr());
-
-            // Get range to normalize
-            const int current_row = do_2D_norm ? id[dim_y] : 0;
-            const int first_row   = do_2D_norm ? std::max(current_row - radius, 0) : 0;
-            const int last_row    = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
-
-            int x = window_start_x;
-            // Compute serially starting elements for the case x dimension is width
-            for (; x < radius && x < window_end_x && dim == 0; ++x)
-            {
-                sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(),
-                                         output_ptr);
-            }
-
-            // Compute vectorized
-            for (; x <= window_end_x - window_step_x - radius; x += window_step_x)
-            {
-                const int current_slice = dim == 0 ? x : id[dim];
-                const int first_slice   = std::max(current_slice - radius, 0);
-                const int last_slice    = std::min(current_slice + radius, max_right);
-
-                const uint8_t *const input_squared_x_ptr = input_squared.ptr() + x * input_squared_stride_x;
-                // Accumulate 2D In-Map values
-                auto accu = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
-                for (int j = first_row; j <= last_row; ++j)
-                {
-                    // Compute row displacement
-                    const uint8_t *const input_squared_ptr =
-                        input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
-                    for (int i = first_slice; i <= last_slice; ++i)
-                    {
-                        accu = wrapper::vadd(
-                            accu, wrapper::vloadq(reinterpret_cast<const T *>(
-                                      input_squared_ptr + (i - current_slice) * input_squared_stride_slice)));
-                    }
-                }
-
-                // Normalize
-                const auto normalized       = wrapper::vpow(wrapper::vmla(kappa_vec, coeff_vec, accu), beta_vec);
-                const auto normalized_pixel = wrapper::vmul(wrapper::vloadq(input_ptr + x), wrapper::vinv(normalized));
-                wrapper::vstore(reinterpret_cast<T *>(output_ptr + x), normalized_pixel);
-            }
-
-            // Compute left-over elements
-            for (; x < window_end_x; ++x)
-            {
-                sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(),
-                                         output_ptr);
-            }
-        },
-        input, input_squared, output);
-}
-
 Status NENormalizationLayerKernel::validate(const ITensorInfo           *input,
                                             const ITensorInfo           *input_squared,
                                             const ITensorInfo           *output,
@@ -309,6 +193,6 @@
     ARM_COMPUTE_ERROR_ON(_func == nullptr);
 
     // Run function
-    (this->*_func)(window);
+    (*_func)(window, _input, _input_squared, _output, _norm_info);
 }
 } // namespace arm_compute
diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.h b/src/core/NEON/kernels/NENormalizationLayerKernel.h
index 2d8d9f3..5ba4c3e 100644
--- a/src/core/NEON/kernels/NENormalizationLayerKernel.h
+++ b/src/core/NEON/kernels/NENormalizationLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_NENORMALIZATIONLAYERKERNEL_H
-#define ARM_COMPUTE_NENORMALIZATIONLAYERKERNEL_H
+#ifndef ACL_SRC_CORE_NEON_KERNELS_NENORMALIZATIONLAYERKERNEL_H
+#define ACL_SRC_CORE_NEON_KERNELS_NENORMALIZATIONLAYERKERNEL_H
 
 #include "src/core/NEON/INEKernel.h"
 
@@ -82,24 +82,12 @@
     void run(const Window &window, const ThreadInfo &info) override;
 
 private:
-    /** Function to perform normalization depending on the given template
-     *  dimension. The second template parameter specifies whether the
-     *  normalization has to be 1D or 2D.
-     *
-     * @note Only supported normalizations are:
-     *  - 1D over X or Z
-     *  - 2D over X and Y
-     *
-     * @param[in] window Region on which to execute the kernel.
-     */
-    template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
-    void normalize_float(const Window &window);
-
     /** Common signature for all the specialised normalization functions
      *
      * @param[in] window Region on which to execute the kernel.
      */
-    using NormalizationFunction = void (NENormalizationLayerKernel::*)(const Window &window);
+    using NormalizationFunction = void (*)(
+        const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo);
 
 private:
     NormalizationFunction  _func;
@@ -109,4 +97,4 @@
     NormalizationLayerInfo _norm_info;
 };
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_NENORMALIZATIONLAYERKERNEL_H */
+#endif // ACL_SRC_CORE_NEON_KERNELS_NENORMALIZATIONLAYERKERNEL_H
diff --git a/src/cpu/kernels/norm_layer/generic/neon/fp16.cpp b/src/cpu/kernels/norm_layer/generic/neon/fp16.cpp
new file mode 100644
index 0000000..f85fe7a
--- /dev/null
+++ b/src/cpu/kernels/norm_layer/generic/neon/fp16.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/CpuTypes.h"
+#include "src/cpu/kernels/norm_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+void neon_normalize_float16_8_0_2D(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float16_t, 8, 0, true>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float16_8_0(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float16_t, 8, 0, false>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float16_8_1_2D(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float16_t, 8, 1, true>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float16_8_1(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float16_t, 8, 1, false>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float16_8_2(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float16_t, 8, 2, false>(window, in, in_squared, out, ninfo);
+}
+
+} // namespace cpu
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/norm_layer/generic/neon/fp32.cpp b/src/cpu/kernels/norm_layer/generic/neon/fp32.cpp
new file mode 100644
index 0000000..0b64f46
--- /dev/null
+++ b/src/cpu/kernels/norm_layer/generic/neon/fp32.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/CpuTypes.h"
+#include "src/cpu/kernels/norm_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void neon_normalize_float32_4_0_2D(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float, 4, 0, true>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float32_4_0(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float, 4, 0, false>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float32_4_1_2D(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float, 4, 1, true>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float32_4_1(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float, 4, 1, false>(window, in, in_squared, out, ninfo);
+}
+
+void neon_normalize_float32_4_2(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    arm_compute::normalize_float<float, 4, 2, false>(window, in, in_squared, out, ninfo);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/norm_layer/generic/neon/impl.h b/src/cpu/kernels/norm_layer/generic/neon/impl.h
new file mode 100644
index 0000000..6103165
--- /dev/null
+++ b/src/cpu/kernels/norm_layer/generic/neon/impl.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_IMPL_H
+#define ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_IMPL_H
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include "src/core/helpers/NormalizationHelpers.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/core/NEON/NEMath.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+
+namespace arm_compute
+{
+/** Function to perform normalization depending on the given template
+ *  dimension. The second template parameter specifies whether the
+ *  normalization has to be 1D or 2D.
+ *
+ * @note Only supported normalizations are:
+ *  - 1D over X or Z
+ *  - 2D over X and Y
+ *
+ * @param[in] window     Region on which to execute the kernel.
+ * @param[in] in         Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ *                       and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. Data layouts supported: NCHW/NHWC.
+ * @param[in] in_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ *                       Data type and layout supported: same as @p input.
+ * @param[in] out        Destination tensor. Output will have the same number of dimensions as input. Data type and layout supported: same as @p input.
+ * @param[in] ninfo      Normalization layer information like the normalization type, normalization size and other parameters.
+ */
+template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
+void normalize_float(
+    const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, NormalizationLayerInfo ninfo)
+{
+    /** SIMD vector tag type. */
+    using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
+    Window win(window);
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    const auto window_start_x = static_cast<int>(window.x().start());
+    const auto window_end_x   = static_cast<int>(window.x().end());
+    const int  window_step_x  = S;
+
+    Iterator input(in, win);
+    Iterator input_squared(in_squared, win);
+    Iterator output(out, win);
+
+    const int dim_y                      = in->info()->data_layout() == DataLayout::NCHW ? 1 : 2;
+    const int radius                     = ninfo.norm_size() / 2;
+    const int input_squared_stride_x     = in_squared->info()->strides_in_bytes()[0];
+    const int input_squared_stride_slice = in_squared->info()->strides_in_bytes()[dim];
+    const int input_squared_stride_row   = in_squared->info()->strides_in_bytes()[dim_y];
+
+    const int max_right  = in->info()->dimension(dim) - 1;
+    const int max_bottom = in->info()->dimension(dim_y) - 1;
+
+    const auto coeff_vec = wrapper::vdup_n(static_cast<T>(ninfo.scale_coeff()), ExactTagType{});
+    const auto beta_vec  = wrapper::vdup_n(static_cast<T>(ninfo.beta()), ExactTagType{});
+    const auto kappa_vec = wrapper::vdup_n(static_cast<T>(ninfo.kappa()), ExactTagType{});
+
+    auto sequential_normalization = [&](const int x, const Coordinates &id, const int current_row, const int first_row,
+                                        const int last_row, const T *input_ptr, const uint8_t *input_squared_start_ptr,
+                                        T *output_ptr)
+    {
+        const int current_slice = dim == 0 ? x : id[dim];
+        const int first_slice   = std::max(current_slice - radius, 0);
+        const int last_slice    = std::min(current_slice + radius, max_right);
+
+        const uint8_t *const input_squared_x_ptr = input_squared_start_ptr + x * input_squared_stride_x;
+        // Accumulate 2D In-Map values
+        auto accu = static_cast<T>(0.f);
+        for (int j = first_row; j <= last_row; ++j)
+        {
+            // Compute row displacement
+            const uint8_t *const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
+            for (int i = first_slice; i <= last_slice; ++i)
+            {
+                accu +=
+                    *reinterpret_cast<const T *>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice);
+            }
+        }
+
+        // Normalize
+        const auto normalized =
+            std::pow(accu * static_cast<T>(ninfo.scale_coeff()) + static_cast<T>(ninfo.kappa()), ninfo.beta());
+        const auto normalized_pixel = (*(input_ptr + x)) / normalized;
+        *(output_ptr + x)           = normalized_pixel;
+    };
+
+    execute_window_loop(
+        win,
+        [&](const Coordinates &id)
+        {
+            const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
+            auto       output_ptr = reinterpret_cast<T *>(output.ptr());
+
+            // Get range to normalize
+            const int current_row = do_2D_norm ? id[dim_y] : 0;
+            const int first_row   = do_2D_norm ? std::max(current_row - radius, 0) : 0;
+            const int last_row    = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
+
+            int x = window_start_x;
+            // Compute serially starting elements for the case x dimension is width
+            for (; x < radius && x < window_end_x && dim == 0; ++x)
+            {
+                sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(),
+                                         output_ptr);
+            }
+
+            // Compute vectorized
+            for (; x <= window_end_x - window_step_x - radius; x += window_step_x)
+            {
+                const int current_slice = dim == 0 ? x : id[dim];
+                const int first_slice   = std::max(current_slice - radius, 0);
+                const int last_slice    = std::min(current_slice + radius, max_right);
+
+                const uint8_t *const input_squared_x_ptr = input_squared.ptr() + x * input_squared_stride_x;
+                // Accumulate 2D In-Map values
+                auto accu = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+                for (int j = first_row; j <= last_row; ++j)
+                {
+                    // Compute row displacement
+                    const uint8_t *const input_squared_ptr =
+                        input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
+                    for (int i = first_slice; i <= last_slice; ++i)
+                    {
+                        accu = wrapper::vadd(
+                            accu, wrapper::vloadq(reinterpret_cast<const T *>(
+                                      input_squared_ptr + (i - current_slice) * input_squared_stride_slice)));
+                    }
+                }
+
+                // Normalize
+                const auto normalized       = wrapper::vpow(wrapper::vmla(kappa_vec, coeff_vec, accu), beta_vec);
+                const auto normalized_pixel = wrapper::vmul(wrapper::vloadq(input_ptr + x), wrapper::vinv(normalized));
+                wrapper::vstore(reinterpret_cast<T *>(output_ptr + x), normalized_pixel);
+            }
+
+            // Compute left-over elements
+            for (; x < window_end_x; ++x)
+            {
+                sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(),
+                                         output_ptr);
+            }
+        },
+        input, input_squared, output);
+}
+
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_IMPL_H
diff --git a/src/cpu/kernels/norm_layer/generic/neon/list.h b/src/cpu/kernels/norm_layer/generic/neon/list.h
new file mode 100644
index 0000000..f2e83d7
--- /dev/null
+++ b/src/cpu/kernels/norm_layer/generic/neon/list.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_LIST_H
+#define ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_LIST_H
+namespace arm_compute
+{
+namespace cpu
+{
+
+#define DECLARE_NORMALIZATION_KERNEL(func_name)                                                      \
+    void func_name(const Window &window, const ITensor *in, const ITensor *in_squared, ITensor *out, \
+                   NormalizationLayerInfo ninfo)
+
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float32_4_0_2D);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float32_4_0);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float32_4_1_2D);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float32_4_1);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float32_4_2);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float16_8_0_2D);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float16_8_0);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float16_8_1_2D);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float16_8_1);
+DECLARE_NORMALIZATION_KERNEL(neon_normalize_float16_8_2);
+
+#undef DECLARE_NORMALIZATION_KERNEL
+} // namespace cpu
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_NORM_LAYER_GENERIC_NEON_LIST_H