Revert "Update the heuristic for CLDepthwiseConvolutionNative kernel"

Resolves COMPMID-5813

Change-Id: I5ef6fe9fb6a54db18e41a71085896fd08bc08dbb
Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8975
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl
index 6d64e27..dcbae22 100644
--- a/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl
+++ b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl
@@ -108,6 +108,7 @@
 #define _IN0_A N0_A        // Cols tile A. It can be either 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
 #define _IM0_B _IWEI_WIDTH // Rows tile B
 #define _IN0_B N0          // Cols tile B
+#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
 
     const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
     const int xo   = GET_SPATIAL_IDX(1, M0, 0);          // WIDTH
@@ -145,22 +146,8 @@
             a[i].v = 0;
         })
 
-        TILE(int, 1, _IM0_A, mi);
-
-        LOOP_UNROLLING(int, xk_i, 0, 1, _IM0_A,
-        {
-            int x_s    = xi + xk_i * (DILATION_X);
-            int y_s    = yi + yk   * (DILATION_Y);
-            mi[0].s[xk_i] = x_s + y_s * SRC_WIDTH;
-            mi[0].s[xk_i] = mi[0].s[xk_i] + bout * (int)(SRC_WIDTH * SRC_HEIGHT);
-            mi[0].s[xk_i] = select(-1, mi[0].s[xk_i], x_s >= 0);
-            mi[0].s[xk_i] = select(-1, mi[0].s[xk_i], x_s < SRC_WIDTH);
-            mi[0].s[xk_i] = select(-1, mi[0].s[xk_i], y_s >= 0);
-            mi[0].s[xk_i] = select(-1, mi[0].s[xk_i], y_s < SRC_HEIGHT);
-        })
-
         // Load tile from the src tensor (TILE A)
-        T_LOAD2D_INDIRECT(SRC_DATA_TYPE, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, (cout / DEPTH_MULTIPLIER), src_stride_y, mi, a);
+        T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, (cout / DEPTH_MULTIPLIER), SRC_WIDTH, SRC_HEIGHT, DILATION_X, 1, _IBOUNDARY_CHECK, a);
 
         TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
 
diff --git a/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl b/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl
index e502d72..2d255e5 100644
--- a/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl
+++ b/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl
@@ -180,22 +180,8 @@
         a[i].v = ZERO_VALUE;
     })
 
-    TILE(int, 1, _IM0_A, my);
-
-    LOOP_UNROLLING(int, xk_i, 0, 1, _IM0_A,
-    {
-        int x_s    = xi + xk_i * (DILATION_X);
-        int y_s    = yi + yk   * (DILATION_Y);
-        my[0].s[xk_i] = x_s + y_s * SRC_WIDTH;
-        my[0].s[xk_i] = my[0].s[xk_i] + bout * (int)(SRC_WIDTH * SRC_HEIGHT);
-        my[0].s[xk_i] = select(-1, my[0].s[xk_i], x_s >= 0);
-        my[0].s[xk_i] = select(-1, my[0].s[xk_i], x_s < SRC_WIDTH);
-        my[0].s[xk_i] = select(-1, my[0].s[xk_i], y_s >= 0);
-        my[0].s[xk_i] = select(-1, my[0].s[xk_i], y_s < SRC_HEIGHT);
-    })
-
-    // Load tile from the src tensor
-    T_LOAD2D_INDIRECT(SRC_DATA_TYPE, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, (cout / DEPTH_MULTIPLIER), src_stride_y, my, a);
+    // Load tile from the src tensor (TILE A)
+    T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, (cout / DEPTH_MULTIPLIER), src_w, src_h, DILATION_X, 1, _IBOUNDARY_CHECK, a);
 
     TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
 
diff --git a/src/runtime/heuristics/dwc_native/ClDWCNativeDefaultConfigValhall.cpp b/src/runtime/heuristics/dwc_native/ClDWCNativeDefaultConfigValhall.cpp
index d0ade1b..49485c8 100644
--- a/src/runtime/heuristics/dwc_native/ClDWCNativeDefaultConfigValhall.cpp
+++ b/src/runtime/heuristics/dwc_native/ClDWCNativeDefaultConfigValhall.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -283,30 +283,15 @@
         desc.n0 = adjust_vec_size(desc.n0, kernel_c);
 
         // Set m0 only if stride_x == 1 and dilation_x == 1
-        // m0 affects the number of rows to load from the input tensor. In fact, when depth_multiplier = 1, the number of rows
-        // loaded from the input tensors are -> kernel_width - (M0 - 1)
-        // The bigger the kernel_width, the smaller the M0 to avoid register spilling.
         if(conv_info.stride().first == 1 && dilation.x() == 1)
         {
-            // When the kernel width and kernel height are unit, it means that we have a pointwise multiplication. Therefore, M0 can be 1
             if((kernel_w >= 9) || (kernel_w == 1))
             {
                 desc.m0 = 1;
             }
             else
             {
-                switch(kernel_w)
-                {
-                    case 3:
-                        desc.m0 = 4;
-                        break;
-                    case 5:
-                        desc.m0 = 3;
-                        break;
-                    default:
-                        desc.m0 = 2;
-                        break;
-                }
+                desc.m0 = 2;
             }
         }
         else