Accumulation in Cpu Gemm kernels is not supported for quantized kernels in aarch32. This patch guards the relevant tests.

Partially Resolves: ONCPUML-1442

Signed-off-by: Radu Salavat <radu.salavat@arm.com>
Change-Id: I8eed80db4b522185c3c50c13f0f701aa48961057
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11410
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 1b07975..8497dd1 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -317,6 +317,8 @@
 }
 TEST_SUITE_END() // FusedOffsetOutput
 
+// accumulation is not supported for Int8/UInt8 in aarch32
+#ifdef __aarch64__
 TEST_SUITE(ACCUMULATION)
 TEST_SUITE(S32)
 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreAccumulateFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
@@ -331,6 +333,7 @@
 }
 TEST_SUITE_END() // S32
 TEST_SUITE_END() // ACCUMULATION
+#endif // __arch64__
 
 TEST_SUITE_END() // MatrixMultiplyCore
 TEST_SUITE_END() // GEMMLowp