COMPMID-3757: (u)int8: Don't select the 16-bit route on A53 for cases with very few rows

Also added 2D version of the 16-bit route, and altered the selection
heuristic so that 2D mode will be used in cases where 1D mode won't
thread well.

Change-Id: I0057fde08456771dc0090ac51f50d82f8bb86044
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3903
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index bddcc8d..147caee 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -122,17 +122,25 @@
     [](const GemmArgs &args) { return new GemmInterleaved<gemm_s8_12x8, int8_t, int32_t>(args); }
 },
 {
-    GemmMethod::GEMM_INTERLEAVED,
-    "gemm_s16_12x8",
+    GemmMethod::GEMM_INTERLEAVED_2D,
+    "gemm_s16_12x8_2d",
     nullptr,
-    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53; },
+    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4 && (args._Msize / args._maxthreads) < 8; },
+    [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<gemm_s16_12x8, int8_t, int32_t>(args); },
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "gemm_s16_12x8_1d",
+    nullptr,
+    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4; },
     [](const GemmArgs &args) { return new GemmInterleaved<gemm_s16_12x8, int8_t, int32_t>(args); },
 },
 {
     GemmMethod::GEMM_INTERLEAVED_2D,
     "gemm_s8_4x4_2d",
     nullptr,
-    [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8); },
+    [](const GemmArgs &args) { return ((args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8)) ||
+                                       ((args._Msize / args._maxthreads) < 4); },
     [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<gemm_s8_4x4, int8_t, int32_t>(args); }
 },
 {
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index 88726b1..06e68cb 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -101,13 +101,6 @@
     [](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_u8u32_dot_4x6, uint8_t, uint32_t>(args); }
 },
 {
-    GemmMethod::GEMM_INTERLEAVED,
-    "gemm_u16_12x8",
-    nullptr,
-    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53; },
-    [](const GemmArgs &args) { return new GemmInterleaved<gemm_u16_12x8, uint8_t, uint32_t>(args); },
-},
-{
     GemmMethod::GEMM_HYBRID,
     "hybrid_u8u32_dot_16x4",
     [](const GemmArgs &args) { return args._ci->has_dotprod() && args._Ksize>=16; },
@@ -130,9 +123,24 @@
 },
 {
     GemmMethod::GEMM_INTERLEAVED_2D,
+    "gemm_u16_12x8_2d",
+    nullptr,
+    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4 && (args._Msize / args._maxthreads) < 8; },
+    [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<gemm_u16_12x8, uint8_t, uint32_t>(args); },
+},
+{
+    GemmMethod::GEMM_INTERLEAVED,
+    "gemm_u16_12x8_1d",
+    nullptr,
+    [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4; },
+    [](const GemmArgs &args) { return new GemmInterleaved<gemm_u16_12x8, uint8_t, uint32_t>(args); },
+},
+{
+    GemmMethod::GEMM_INTERLEAVED_2D,
     "gemm_u8_4x4_2d",
     nullptr,
-    [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8); },
+    [](const GemmArgs &args) { return ((args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8)) ||
+                                       ((args._Msize / args._maxthreads) < 4); },
     [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<gemm_u8_4x4, uint8_t, uint32_t>(args); }
 },
 {
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index 30232b4..ad349cb 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -415,34 +415,6 @@
         in1_ptr        = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
     }
 
-    // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
-    if(_workspace.buffer() != nullptr)
-    {
-        _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
-        const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
-        unsigned int       num_threads = NEScheduler::get().num_threads();
-        if(window_size < num_threads)
-        {
-            num_threads = window_size;
-            _gemm_kernel_asm->set_nthreads(num_threads);
-        }
-    }
-
-    // Prepare assembly kernel
-    prepare();
-
-    TypeOutput *bias = nullptr;
-    // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
-    if(_c && _c->info()->data_type() != DataType::S32)
-    {
-        bias = reinterpret_cast<TypeOutput *>(_c->buffer() + _c->info()->offset_first_element_in_bytes());
-    }
-    // Set gemm parameters
-    _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
-                                 in1_ptr, ldb, multi_stride_b,
-                                 out_ptr, ldd, batch_stride_d, multi_stride_d,
-                                 bias, 0);
-    // Schedule assembly kernel
     IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX);
     if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && _d->info()->data_type() == DataType::F32)
     {
@@ -463,6 +435,41 @@
         scheduling_hint             = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
     }
 
+    // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
+    if(_workspace.buffer() != nullptr)
+    {
+        _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
+        const unsigned int split_dim   = scheduling_hint.split_dimension();
+        const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
+        unsigned int       num_threads = NEScheduler::get().num_threads();
+        if(window_size < num_threads)
+        {
+            num_threads = window_size;
+        }
+        if(split_dim != IScheduler::split_dimensions_all)
+        {
+            // Make sure the kernel does not expect more threads than we can actually spawn
+            const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim);
+            num_threads                       = std::min(num_iterations, num_threads);
+        }
+        _gemm_kernel_asm->set_nthreads(num_threads);
+    }
+
+    // Prepare assembly kernel
+    prepare();
+
+    TypeOutput *bias = nullptr;
+    // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
+    if(_c && _c->info()->data_type() != DataType::S32)
+    {
+        bias = reinterpret_cast<TypeOutput *>(_c->buffer() + _c->info()->offset_first_element_in_bytes());
+    }
+    // Set gemm parameters
+    _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
+                                 in1_ptr, ldb, multi_stride_b,
+                                 out_ptr, ldd, batch_stride_d, multi_stride_d,
+                                 bias, 0);
+    // Schedule assembly kernel
     NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
 }