Meanstddevnorm changes to enable fp16 in armv8a multi_isa builds

    * Code guarded with __ARM_FEATURE_FP16_VECTOR_ARITHMETIC needs
      to be moved to an fp16.cpp file to allow compilation with
      -march=armv8.2-a+fp16

    * Partially resolves MLCE-1102

Change-Id: I7e6d998e427982d4a037dbce6d17ca378665e07f
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10241
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/cpu/kernels/meanstddevnorm/generic/neon/fp16.cpp b/src/cpu/kernels/meanstddevnorm/generic/neon/fp16.cpp
index 47bf64a..96e4030 100644
--- a/src/cpu/kernels/meanstddevnorm/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/meanstddevnorm/generic/neon/fp16.cpp
@@ -22,13 +22,89 @@
  * SOFTWARE.
  */
 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
-#include "src/cpu/CpuTypes.h"
+
 #include "src/cpu/kernels/meanstddevnorm/generic/neon/impl.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/CpuTypes.h"
 
 namespace arm_compute
 {
 namespace cpu
 {
+template <>
+void mean_stddev_normalization<float16_t, 8>(ITensor *input, ITensor *output, float epsilon, const Window &window)
+{
+    // Set build options
+    Window win = window;
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    const int  window_step_x  = 8;
+    const auto window_start_x = static_cast<int>(window.x().start());
+    const auto window_end_x   = static_cast<int>(window.x().end());
+
+    Iterator input_itr(input, win);
+    Iterator output_itr(output, win);
+
+    execute_window_loop(win, [&](const Coordinates &)
+    {
+        int  x       = window_start_x;
+        auto in_ptr  = reinterpret_cast<const float16_t *>(input_itr.ptr());
+        auto out_ptr = reinterpret_cast<float16_t *>(output_itr.ptr());
+
+        float16x8_t sum_vec    = vdupq_n_f16(static_cast<float16_t>(0.0f));
+        float32x4_t sum_sq_vec = vdupq_n_f32(0.0f);
+
+        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+        {
+            float16x8_t data = vld1q_f16(in_ptr + x);
+            sum_vec          = vaddq_f16(sum_vec, data);
+            float32x4_t dl   = vcvt_f32_f16(vget_low_f16(data));
+            float32x4_t dh   = vcvt_f32_f16(vget_high_f16(data));
+            sum_sq_vec       = vaddq_f32(sum_sq_vec, vmulq_f32(dl, dl));
+            sum_sq_vec       = vaddq_f32(sum_sq_vec, vmulq_f32(dh, dh));
+        }
+
+        float16x4_t sum_carry_res = vpadd_f16(vget_high_f16(sum_vec), vget_low_f16(sum_vec));
+        sum_carry_res             = vpadd_f16(sum_carry_res, sum_carry_res);
+        sum_carry_res             = vpadd_f16(sum_carry_res, sum_carry_res);
+
+        float32x4_t sum_sq_carry_res = vpaddq_f32(sum_sq_vec, sum_sq_vec);
+        sum_sq_carry_res             = vpaddq_f32(sum_sq_carry_res, sum_sq_carry_res);
+
+        float16_t sum    = vget_lane_f16(sum_carry_res, 0);
+        float     sum_sq = vgetq_lane_f32(sum_sq_carry_res, 0);
+
+        // Compute left-over elements
+        for(; x < window_end_x; ++x)
+        {
+            float16_t data = *(in_ptr + x);
+            sum += data;
+            float fdata = static_cast<float>(data);
+            sum_sq += fdata * fdata;
+        }
+
+        float16_t mean       = sum / input->info()->dimension(0);
+        float     var        = (sum_sq / input->info()->dimension(0)) - (mean * mean);
+        float16_t stddev_inv = static_cast<float16_t>(1.f / sqrt(var + epsilon));
+
+        float16x8_t mean_vec       = vdupq_n_f16(mean);
+        float16x8_t stddev_inv_vec = vdupq_n_f16(stddev_inv);
+
+        for(x = window_start_x; x <= (window_end_x - window_step_x); x += window_step_x)
+        {
+            float16x8_t data = vld1q_f16(in_ptr + x);
+            float16x8_t res  = vmulq_f16(vsubq_f16(data, mean_vec), stddev_inv_vec);
+            // Store results
+            vst1q_f16(out_ptr + x, res);
+        }
+        for(; x < window_end_x; ++x)
+        {
+            *(out_ptr + x) = (*(in_ptr + x) - mean) * stddev_inv;
+        }
+    },
+    input_itr, output_itr);
+}
+
 void neon_fp16_meanstddevnorm(ITensor *input, ITensor *output, float epsilon, const Window &window)
 {
     return mean_stddev_normalization<float16_t, 8>(input, output, epsilon, window);