Refactor arm_gemm to enable FP16 in all multi_isa builds

* Resolves MLCE-1285

Change-Id: I22a37972aefe1c0f04accbc798baa18358ed8959
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11552
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
index 5959193..7c09608 100644
--- a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022 Arm Limited.
+ * Copyright (c) 2020-2022, 2024 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -330,11 +330,11 @@
 #endif // ARM_COMPUTE_ENABLE_SVE && ARM_COMPUTE_ENABLE_SVEF32MM
 
 /* FP16 */
-#if defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16)
 template void IndirectInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
 template void ConvolutionInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
 template void Interleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
-#endif // FP16_KERNELS ar __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // FP16_KERNELS ar ARM_COMPUTE_ENABLE_FP16
 
 template void IndirectInterleave<8, 1, VLType::None>(float *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
 template void ConvolutionInterleave<8, 1, VLType::None>(float *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
index 586d6a6..d9668aa 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,7 +23,7 @@
  */
 #pragma once
 
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16))
 
 #include "../performance_parameters.hpp"
 #include "../std_transforms_fixed.hpp"
@@ -89,4 +89,4 @@
 
 } // namespace arm_gemm
 
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (FP16_KERNELS || ARM_COMPUTE_ENABLE_FP16)
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
index a81d450..ba47e0a 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,7 +23,7 @@
  */
 #pragma once
 
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16))
 
 template<>
 void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const __fp16 *bias, Activation act, bool append)
@@ -86,7 +86,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -140,7 +140,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -217,7 +217,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -317,7 +317,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -439,7 +439,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -584,7 +584,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -752,7 +752,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -944,7 +944,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1150,7 +1150,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1204,7 +1204,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1278,7 +1278,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1372,7 +1372,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1485,7 +1485,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1618,7 +1618,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1771,7 +1771,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -1945,7 +1945,7 @@
                         } else {
                             /* Optimized routine to copy an entire block */
                             __asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
                                 ".arch  armv8.2-a+fp16\n"
 #endif
                                 "dup v0.8h, %[maxval].h[0]\n"
@@ -2112,4 +2112,4 @@
     }
 }
 
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (FP16_KERNELS || ARM_COMPUTE_ENABLE_FP16)
diff --git a/src/core/NEON/kernels/arm_gemm/transform.cpp b/src/core/NEON/kernels/arm_gemm/transform.cpp
index 45e4f0e..06d9e24 100644
--- a/src/core/NEON/kernels/arm_gemm/transform.cpp
+++ b/src/core/NEON/kernels/arm_gemm/transform.cpp
@@ -129,17 +129,17 @@
 // We don't have assembler transforms for AArch32, generate templated ones here.
 #ifdef __arm__
 template void Transform<8, 1, true, VLType::None>(float *, const float *, int, int, int, int, int);
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(ARM_COMPUTE_ENABLE_FP16)
 template void Transform<8, 1, true, VLType::None>(float *, const __fp16 *, int, int, int, int, int);
-#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // defined(ARM_COMPUTE_ENABLE_FP16)
 #ifdef ARM_COMPUTE_ENABLE_BF16
 template void Transform<8, 1, true, VLType::None>(float *, const bfloat16 *, int, int, int, int, int);
 #endif // ARM_COMPUTE_ENABLE_BF16
 #endif // AArch32
 
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(ARM_COMPUTE_ENABLE_FP16)
 template void Transform<12, 1, false, VLType::None>(float *, const __fp16 *, int, int, int, int, int);
-#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // defined(ARM_COMPUTE_ENABLE_FP16)
 #ifdef ARM_COMPUTE_ENABLE_BF16
 template void Transform<12, 1, false, VLType::None>(float *, const bfloat16 *, int, int, int, int, int);
 #endif // ARM_COMPUTE_ENABLE_BF16