Disable SME2 Gemm kernel selection in case results needs to be accumulated

SME2 kernels use a different accumulation buffer and destination tensor is not copied to this buffer as initial value, thus causing mismatches. This patch modifies the kernel selection algorithm such that it does not select SME2 kernels if accumulation is required.

Resolves: COMPMID-6995

Change-Id: I82da3cba41729f938a046f26b41b63ff5716c02d
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11500
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 290fe87..af0d38e 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -124,14 +124,14 @@
 {
     GemmMethod::GEMM_HYBRID,
     "sme2_gemv_fp32bf16fp32_dot_16VL",
-    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
     nullptr,
     [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32bf16fp32_dot_16VL, float, float>(args); }
 },
 {
     GemmMethod::GEMM_HYBRID,
     "sme2_gemv_fp32_mla_16VL",
-    [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+    [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
     nullptr,
     [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32_mla_16VL, float, float>(args); }
 },
@@ -139,7 +139,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
-    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
     [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
                                return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, float, float>(args); }
@@ -148,7 +148,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_fp32_mopa_1VLx4VL",
-    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
     [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
                                return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL, float, float>(args); }
@@ -157,7 +157,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL",
-    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
     [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
                                return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL, float, float>(args); }
@@ -166,7 +166,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_fp32_mopa_4VLx1VL",
-    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
     [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
                                return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL, float, float>(args); }
@@ -175,7 +175,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL",
-    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
     nullptr,
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL, float, float>(args); }
 },
@@ -183,7 +183,7 @@
 {
     GemmMethod::GEMM_INTERLEAVED,
     "sme2_interleaved_nomerge_fp32_mopa_2VLx2VL",
-    [](const GemmArgs &args) { return args._ci->has_sme2(); },
+    [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
     nullptr,
     [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL, float, float>(args); }
 },