COMPMID-1316 Using 8 bit dot product instruction in CLDepthWiseConvolution with QASYMM8

Change-Id: I3fc37bdceaae8b4b1effa51129b71bf352388564
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138374
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 0e83ff2..de78786 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -117,6 +117,11 @@
     return device_supports_extension(device, "cl_arm_integer_dot_product_int8");
 }
 
+bool dot8_acc_supported(const cl::Device &device)
+{
+    return device_supports_extension(device, "cl_arm_integer_dot_product_accumulate_int8");
+}
+
 CLVersion get_cl_version(const cl::Device &device)
 {
     std::string version_str = device.getInfo<CL_DEVICE_VERSION>();
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 4753524..64519ff 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -200,8 +200,10 @@
     { "depthwise_convolution_3x3_nhwc", "depthwise_convolution.cl" },
     { "depthwise_convolution_3x3_nhwc_stride1", "depthwise_convolution.cl" },
     { "depthwise_convolution_3x3_quantized_nchw", "depthwise_convolution_quantized.cl" },
+    { "depthwise_convolution_3x3_quantized_nhwc", "depthwise_convolution_quantized.cl" },
     { "depthwise_convolution_3x3_quantized_nhwc_stride1", "depthwise_convolution_quantized.cl" },
-    { "depthwise_convolution_3x3_quantized_nhwc_stride2", "depthwise_convolution_quantized.cl" },
+    { "depthwise_convolution_3x3_quantized_dot8_nchw", "depthwise_convolution_quantized.cl" },
+    { "depthwise_convolution_3x3_quantized_dot8_nhwc_stride1", "depthwise_convolution_quantized.cl" },
     { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16", "depthwise_convolution.cl" },
     { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16", "depthwise_convolution.cl" },
     { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32", "depthwise_convolution.cl" },
@@ -810,6 +812,11 @@
         concat_str += " -DARM_COMPUTE_OPENCL_DOT8_ENABLED=1 ";
     }
 
+    if(dot8_acc_supported(_device))
+    {
+        concat_str += " -DARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED=1 ";
+    }
+
     if(get_cl_version(_device) == CLVersion::CL20)
     {
         concat_str += " -cl-std=CL2.0 ";
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
index 88e009d..ca8efcd 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
@@ -37,12 +37,22 @@
 #define ACTIVATION_FUNC(x) (x)
 #endif /* defined(FUSED_ACTIVATION) */
 
-#if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X)
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED)
+#define ARM_DOT(x0, x1, x2, x3, y0, y1, y2, y3, val) val = arm_dot_acc((uchar4)(x0, x1, x2, x3), (uchar4)(y0, y1, y2, y3), val);
+#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED)
+#define ARM_DOT(x0, x1, x2, x3, y0, y1, y2, y3, val) val += arm_dot((uchar4)(x0, x1, x2, x3), (uchar4)(y0, y1, y2, y3));
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED)
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
+
+#if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER)
 
 #if CONV_STRIDE_X > 3
 #error "Stride X not supported"
 #endif /* CONV_STRIDE_X > 3 */
 
+#if !defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
+
 #if CONV_STRIDE_X == 1
 #define GET_VALUES(first_value, left, middle, right)                              \
     ({                                                                            \
@@ -250,29 +260,40 @@
 #endif /* CONV_STRIDE_Y == 1 */
 }
 
-#endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) */
+#else // !defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
 
-#if defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
-
-#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
-
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE)
-#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE)
-
-#define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_USHORT) * CONVERT(y, VEC_USHORT), VEC_INT)
-
-#if WEIGHTS_OFFSET != 0
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) \
-    ({                                          \
-        sum += CONVERT(x, VEC_INT);             \
-        MULTIPLY_ADD(x, y, acc);                \
+#if CONV_STRIDE_X == 1
+#define GET_VALUES(first_value, left, middle, right)                 \
+    ({                                                               \
+        uchar8 temp0 = vload8(0, first_value);                       \
+        uchar2 temp1 = vload2(0, (first_value + 8 * sizeof(uchar))); \
+        \
+        left   = temp0.s01234567;                                    \
+        middle = (uchar8)(temp0.s1234, temp0.s567, temp1.s0);        \
+        right  = (uchar8)(temp0.s2345, temp0.s67, temp1.s01);        \
     })
-#else /* WEIGHTS_OFFSET != 0 */
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) MULTIPLY_ADD(x, y, acc)
-#endif /* WEIGHTS_OFFSET != 0 */
-
-/** This function computes the depthwise convolution quantized.
+#elif CONV_STRIDE_X == 2
+#define GET_VALUES(first_value, left, middle, right)         \
+    ({                                                       \
+        uchar16 temp0 = vload16(0, first_value);             \
+        uchar   temp1 = *(first_value + 16 * sizeof(uchar)); \
+        \
+        left   = temp0.s02468ace;                            \
+        middle = temp0.s13579bdf;                            \
+        right  = (uchar8)(temp0.s2468, temp0.sace, temp1);   \
+    })
+#else /* CONV_STRIDE_X */
+#define GET_VALUES(first_value, left, middle, right)                   \
+    ({                                                                 \
+        uchar16 temp0 = vload16(0, first_value);                       \
+        uchar8  temp1 = vload8(0, (first_value + 16 * sizeof(uchar))); \
+        \
+        left   = (uchar8)(temp0.s0369, temp0.scf, temp1.s25);          \
+        middle = (uchar8)(temp0.s147a, temp0.sd, temp1.s036);          \
+        right  = (uchar8)(temp0.s258b, temp0.se, temp1.s147);          \
+    })
+#endif /* CONV_STRIDE_X */
+/** This function computes the depthwise convolution quantized using dot product when the data layout is NCHW.
  *
  * @param[in] src_ptr                               Pointer to the source image. Supported data types: QASYMM8
  * @param[in] src_stride_x                          Stride of the source image in X dimension (in bytes)
@@ -304,154 +325,568 @@
  * @param[in] biases_offset_first_element_in_bytes  (Optional) The offset of the first element in the biases vector
  */
 
+__kernel void depthwise_convolution_3x3_quantized_dot8_nchw(
+    TENSOR3D_DECLARATION(src),
+    TENSOR3D_DECLARATION(dst),
+    TENSOR3D_DECLARATION(weights)
+#if defined(HAS_BIAS)
+    ,
+    VECTOR_DECLARATION(biases)
+#endif //defined(HAS_BIAS)
+)
+{
+    Image    src     = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src);
+    Image    dst     = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
+    Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT(weights);
+#if defined(HAS_BIAS)
+    Vector   biases  = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
+
+    const int bias_value = *((__global int *)(vector_offset(&biases, get_global_id(2))));
+#endif //defined(HAS_BIAS)
+
+    src.ptr -= (get_global_id(2) - get_global_id(2) / DEPTH_MULTIPLIER) * src_step_z;
+
+    uchar3 w0 = vload3(0, weights.ptr + 0 * weights_stride_y);
+    uchar3 w1 = vload3(0, weights.ptr + 1 * weights_stride_y);
+    uchar3 w2 = vload3(0, weights.ptr + 2 * weights_stride_y);
+
+    uchar8 left0, middle0, right0;
+    uchar8 left1, middle1, right1;
+    uchar8 left2, middle2, right2;
+
+    int8 values0 = 0;
+    int8 sum0    = 0;
+
+    GET_VALUES(src.ptr + 0 * src_stride_y, left0, middle0, right0);
+    GET_VALUES(src.ptr + 1 * src_stride_y, left1, middle1, right1);
+    GET_VALUES(src.ptr + 2 * src_stride_y, left2, middle2, right2);
+
+#if WEIGHTS_OFFSET != 0
+    sum0 += convert_int8(left0) + convert_int8(middle0) + convert_int8(right0);
+    sum0 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
+    sum0 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
+#endif /* WEIGHTS_OFFSET != 0 */
+
+#if CONV_STRIDE_Y == 1
+    // If conv_stride_y is equals to 1, we compute two output rows
+
+    uchar8 left3, middle3, right3;
+    int8   values1 = 0;
+    int8   sum1    = 0;
+
+    GET_VALUES(src.ptr + 3 * src_stride_y, left3, middle3, right3);
+
+#if WEIGHTS_OFFSET != 0
+    sum1 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
+    sum1 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
+    sum1 += convert_int8(left3) + convert_int8(middle3) + convert_int8(right3);
+#endif /* WEIGHTS_OFFSET != 0 */
+#endif // CONV_STRIDE_Y == 1
+
+    ARM_DOT(left0.s0, middle0.s0, right0.s0, left1.s0, w0.s0, w0.s1, w0.s2, w1.s0, values0.s0);
+    ARM_DOT(middle1.s0, right1.s0, left2.s0, middle2.s0, w1.s1, w1.s2, w2.s0, w2.s1, values0.s0);
+    values0.s0 += right2.s0 * w2.s2;
+
+    ARM_DOT(left0.s1, middle0.s1, right0.s1, left1.s1, w0.s0, w0.s1, w0.s2, w1.s0, values0.s1);
+    ARM_DOT(middle1.s1, right1.s1, left2.s1, middle2.s1, w1.s1, w1.s2, w2.s0, w2.s1, values0.s1);
+    values0.s1 += right2.s1 * w2.s2;
+
+    ARM_DOT(left0.s2, middle0.s2, right0.s2, left1.s2, w0.s0, w0.s1, w0.s2, w1.s0, values0.s2);
+    ARM_DOT(middle1.s2, right1.s2, left2.s2, middle2.s2, w1.s1, w1.s2, w2.s0, w2.s1, values0.s2);
+    values0.s2 += right2.s2 * w2.s2;
+
+    ARM_DOT(left0.s3, middle0.s3, right0.s3, left1.s3, w0.s0, w0.s1, w0.s2, w1.s0, values0.s3);
+    ARM_DOT(middle1.s3, right1.s3, left2.s3, middle2.s3, w1.s1, w1.s2, w2.s0, w2.s1, values0.s3);
+    values0.s3 += right2.s3 * w2.s2;
+
+    ARM_DOT(left0.s4, middle0.s4, right0.s4, left1.s4, w0.s0, w0.s1, w0.s2, w1.s0, values0.s4);
+    ARM_DOT(middle1.s4, right1.s4, left2.s4, middle2.s4, w1.s1, w1.s2, w2.s0, w2.s1, values0.s4);
+    values0.s4 += right2.s4 * w2.s2;
+
+    ARM_DOT(left0.s5, middle0.s5, right0.s5, left1.s5, w0.s0, w0.s1, w0.s2, w1.s0, values0.s5);
+    ARM_DOT(middle1.s5, right1.s5, left2.s5, middle2.s5, w1.s1, w1.s2, w2.s0, w2.s1, values0.s5);
+    values0.s5 += right2.s5 * w2.s2;
+
+    ARM_DOT(left0.s6, middle0.s6, right0.s6, left1.s6, w0.s0, w0.s1, w0.s2, w1.s0, values0.s6);
+    ARM_DOT(middle1.s6, right1.s6, left2.s6, middle2.s6, w1.s1, w1.s2, w2.s0, w2.s1, values0.s6);
+    values0.s6 += right2.s6 * w2.s2;
+
+    ARM_DOT(left0.s7, middle0.s7, right0.s7, left1.s7, w0.s0, w0.s1, w0.s2, w1.s0, values0.s7);
+    ARM_DOT(middle1.s7, right1.s7, left2.s7, middle2.s7, w1.s1, w1.s2, w2.s0, w2.s1, values0.s7);
+    values0.s7 += right2.s7 * w2.s2;
+
+#if CONV_STRIDE_Y == 1
+    ARM_DOT(left1.s0, middle1.s0, right1.s0, left2.s0, w0.s0, w0.s1, w0.s2, w1.s0, values1.s0);
+    ARM_DOT(middle2.s0, right2.s0, left3.s0, middle3.s0, w1.s1, w1.s2, w2.s0, w2.s1, values1.s0);
+    values1.s0 += right3.s0 * w2.s2;
+
+    ARM_DOT(left1.s1, middle1.s1, right1.s1, left2.s1, w0.s0, w0.s1, w0.s2, w1.s0, values1.s1);
+    ARM_DOT(middle2.s1, right2.s1, left3.s1, middle3.s1, w1.s1, w1.s2, w2.s0, w2.s1, values1.s1);
+    values1.s1 += right3.s1 * w2.s2;
+
+    ARM_DOT(left1.s2, middle1.s2, right1.s2, left2.s2, w0.s0, w0.s1, w0.s2, w1.s0, values1.s2);
+    ARM_DOT(middle2.s2, right2.s2, left3.s2, middle3.s2, w1.s1, w1.s2, w2.s0, w2.s1, values1.s2);
+    values1.s2 += right3.s2 * w2.s2;
+
+    ARM_DOT(left1.s3, middle1.s3, right1.s3, left2.s3, w0.s0, w0.s1, w0.s2, w1.s0, values1.s3);
+    ARM_DOT(middle2.s3, right2.s3, left3.s3, middle3.s3, w1.s1, w1.s2, w2.s0, w2.s1, values1.s3);
+    values1.s3 += right3.s3 * w2.s2;
+
+    ARM_DOT(left1.s4, middle1.s4, right1.s4, left2.s4, w0.s0, w0.s1, w0.s2, w1.s0, values1.s4);
+    ARM_DOT(middle2.s4, right2.s4, left3.s4, middle3.s4, w1.s1, w1.s2, w2.s0, w2.s1, values1.s4);
+    values1.s4 += right3.s4 * w2.s2;
+
+    ARM_DOT(left1.s5, middle1.s5, right1.s5, left2.s5, w0.s0, w0.s1, w0.s2, w1.s0, values1.s5);
+    ARM_DOT(middle2.s5, right2.s5, left3.s5, middle3.s5, w1.s1, w1.s2, w2.s0, w2.s1, values1.s5);
+    values1.s5 += right3.s5 * w2.s2;
+
+    ARM_DOT(left1.s6, middle1.s6, right1.s6, left2.s6, w0.s0, w0.s1, w0.s2, w1.s0, values1.s6);
+    ARM_DOT(middle2.s6, right2.s6, left3.s6, middle3.s6, w1.s1, w1.s2, w2.s0, w2.s1, values1.s6);
+    values1.s6 += right3.s6 * w2.s2;
+
+    ARM_DOT(left1.s7, middle1.s7, right1.s7, left2.s7, w0.s0, w0.s1, w0.s2, w1.s0, values1.s7);
+    ARM_DOT(middle2.s7, right2.s7, left3.s7, middle3.s7, w1.s1, w1.s2, w2.s0, w2.s1, values1.s7);
+    values1.s7 += right3.s7 * w2.s2;
+#endif // CONV_STRIDE_Y == 1
+
+#if defined(HAS_BIAS)
+    values0 += (int8)(bias_value);
+#if CONV_STRIDE_Y == 1
+    values1 += (int8)(bias_value);
+#endif /* CONV_STRIDE_Y == 1 */
+#endif //defined(HAS_BIAS)
+
+#if WEIGHTS_OFFSET != 0
+    values0 += sum0 * (int8)(WEIGHTS_OFFSET);
+#if CONV_STRIDE_Y == 1
+    values1 += sum1 * (int8)(WEIGHTS_OFFSET);
+#endif /* CONV_STRIDE_Y == 1 */
+#endif /* WEIGHTS_OFFSET != 0 */
+
+#if INPUT_OFFSET != 0
+    ushort  sum_weights = 0;
+    ushort3 tmp_we      = convert_ushort3(w0) + convert_ushort3(w1) + convert_ushort3(w2);
+    sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
+    values0 += sum_weights * (int8)(INPUT_OFFSET);
+#if CONV_STRIDE_Y == 1
+    values1 += sum_weights * (int8)(INPUT_OFFSET);
+#endif /* CONV_STRIDE_Y == 1 */
+#endif /* INPUT_OFFSET != 0 */
+
+#if K_OFFSET != 0
+    values0 += (int8)(K_OFFSET);
+#if CONV_STRIDE_Y == 1
+    values1 += (int8)(K_OFFSET);
+#endif /* CONV_STRIDE_Y == 1 */
+#endif /* K_OFFSET != 0 */
+
+    values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
+    values0 += (int8)OUTPUT_OFFSET;
+    uchar8 res0 = convert_uchar8_sat(values0);
+    res0        = max(res0, (uchar8)0);
+    res0        = min(res0, (uchar8)255);
+
+    vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
+#if CONV_STRIDE_Y == 1
+
+    values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
+    values1 += (int8)OUTPUT_OFFSET;
+    uchar8 res1 = convert_uchar8_sat(values1);
+    res1        = max(res1, (uchar8)0);
+    res1        = min(res1, (uchar8)255);
+
+    vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
+#endif /* CONV_STRIDE_Y == 1 */
+}
+
+#endif // ARM_COMPUTE_OPENCL_DOT8_ENABLED
+
+#endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) */
+
+#if defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
+
+#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
+
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
+#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE)
+#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE)
+
+#define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_USHORT) * CONVERT(y, VEC_USHORT), VEC_INT)
+
+#if WEIGHTS_OFFSET != 0
+#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) \
+    ({                                          \
+        sum += CONVERT(x, VEC_INT);             \
+        MULTIPLY_ADD(x, y, acc);                \
+    })
+#else /* WEIGHTS_OFFSET != 0 */
+#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) MULTIPLY_ADD(x, y, acc)
+#endif /* WEIGHTS_OFFSET != 0 */
+
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
+#define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) \
+    ({                                                                                                             \
+        ARM_DOT(val0.s0, val1.s0, val2.s0, val3.s0, w0.s0, w1.s0, w2.s0, w3.s0, acc.s0);                           \
+        ARM_DOT(val4.s0, val5.s0, val6.s0, val7.s0, w4.s0, w5.s0, w6.s0, w7.s0, acc.s0);                           \
+        acc.s0 += val8.s0 * w8.s0;                                                                                 \
+        \
+        ARM_DOT(val0.s1, val1.s1, val2.s1, val3.s1, w0.s1, w1.s1, w2.s1, w3.s1, acc.s1);                           \
+        ARM_DOT(val4.s1, val5.s1, val6.s1, val7.s1, w4.s1, w5.s1, w6.s1, w7.s1, acc.s1);                           \
+        acc.s1 += val8.s1 * w8.s1;                                                                                 \
+        \
+        ARM_DOT(val0.s2, val1.s2, val2.s2, val3.s2, w0.s2, w1.s2, w2.s2, w3.s2, acc.s2);                           \
+        ARM_DOT(val4.s2, val5.s2, val6.s2, val7.s2, w4.s2, w5.s2, w6.s2, w7.s2, acc.s2);                           \
+        acc.s2 += val8.s2 * w8.s2;                                                                                 \
+        \
+        ARM_DOT(val0.s3, val1.s3, val2.s3, val3.s3, w0.s3, w1.s3, w2.s3, w3.s3, acc.s3);                           \
+        ARM_DOT(val4.s3, val5.s3, val6.s3, val7.s3, w4.s3, w5.s3, w6.s3, w7.s3, acc.s3);                           \
+        acc.s3 += val8.s3 * w8.s3;                                                                                 \
+    })
+
+#if WEIGHTS_OFFSET != 0
+#define DOT_PRODUCT_ACCUMULATE(acc, sum, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8)                                                                                                             \
+    ({                                                                                                                                                                                                                                         \
+        sum += CONVERT(val0, VEC_INT) + CONVERT(val1, VEC_INT) + CONVERT(val2, VEC_INT) + CONVERT(val3, VEC_INT) + CONVERT(val4, VEC_INT) + CONVERT(val5, VEC_INT) + CONVERT(val6, VEC_INT) + CONVERT(val7, VEC_INT) + CONVERT(val8, VEC_INT); \
+        DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8);                                                                                                                            \
+    })
+#else /* WEIGHTS_OFFSET != 0 */
+#define DOT_PRODUCT_ACCUMULATE(acc, sum, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8)
+#endif /* WEIGHTS_OFFSET != 0 */
+
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
+
+#if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
+/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width or height is not 1.
+ *
+ * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
+ * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
+ * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
+ * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
+ *
+ * @param[in] src_ptr                               Pointer to the source image. Supported data types: QASYMM8
+ * @param[in] src_stride_x                          Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x                            src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y                          Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y                            src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes     The offset of the first element in the source image
+ * @param[in] src_stride_z                          Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z                            src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr                               Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x                          Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x                            dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y                          Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y                            dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z                          Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z                            dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes     The offset of the first element in the destination tensor
+ * @param[in] weights_ptr                           Pointer to the weights tensor. Supported data types: same as @p src_ptr
+ * @param[in] weights_stride_x                      Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x                        weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y                      Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y                        weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z                      Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z                        weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr                            (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
+ * @param[in] biases_stride_x                       (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x                         (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes  (Optional) The offset of the first element in the biases vector
+ * @param[in] max_offset                            Max offset for the input tensor
+ */
+__kernel void depthwise_convolution_3x3_quantized_nhwc(
+    TENSOR3D_DECLARATION(src),
+    TENSOR3D_DECLARATION(dst),
+    TENSOR3D_DECLARATION(weights),
+#if defined(HAS_BIAS)
+    VECTOR_DECLARATION(biases),
+#endif /* defined(HAS_BIAS) */
+    int max_offset)
+{
+    const int x = get_global_id(0); // channels
+    const int y = get_global_id(1); // spatial coordinate x
+    const int z = get_global_id(2); // spatial coordinate y
+
+    Vector weights = CONVERT_TO_VECTOR_STRUCT(weights);
+
+    __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
+
+    int        z_coord  = 0;
+    int4       offset   = 0;
+    const int4 y_offset = ((int4)(y * CONV_STRIDE_X) + (int4)(0, 1, 2, 3) - CONV_PAD_LEFT) * (int4)src_stride_y;
+
+    // We compute 2x1x1 [C,W,H] elements
+    VEC_INT acc = 0, sum = 0;
+
+    // Load weights
+    VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z);
+
+#if INPUT_OFFSET != 0
+    VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) + CONVERT(w6, VEC_INT) + CONVERT(w7,
+                     VEC_INT)
+                     + CONVERT(w8, VEC_INT);
+#endif /* INPUT_OFFSET != 0 */
+
+    // Load input values
+    // z == 0
+    // Clamp z_coord as for z = 0, it can be negative
+    // z_coord is casted to unsigned int in order to use just a min() operation
+    // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
+    z_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
+    z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
+    offset  = y_offset + (int4)(z_coord * src_stride_z);
+    offset  = min(offset, max_offset);
+
+    VEC_UCHAR values0 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values1 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values2 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+
+    // z == 1
+    // z_coord can be only negative for z = 0 so we do not need to clamp it
+    // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset
+    z_coord           = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP + 1;
+    offset            = y_offset + (int4)(z_coord * src_stride_z);
+    VEC_UCHAR values3 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values4 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values5 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+
+    // z == 2
+    // After z = 1 we can simply add src_stride_z to offset without updating z_coord
+    // However offset can be out-of-bound so we need to check if it is greater than max_offset
+    offset += (int4)src_stride_z;
+    offset            = min(offset, max_offset);
+    VEC_UCHAR values6 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values7 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values8 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+
+    MULTIPLY_ADD_ACCUMULATE(values0, w0, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values1, w1, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values2, w2, acc, sum);
+
+    MULTIPLY_ADD_ACCUMULATE(values3, w3, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values4, w4, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values5, w5, acc, sum);
+
+    MULTIPLY_ADD_ACCUMULATE(values6, w6, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values7, w7, acc, sum);
+    MULTIPLY_ADD_ACCUMULATE(values8, w8, acc, sum);
+
+#if defined(HAS_BIAS)
+    Vector  biases      = CONVERT_TO_VECTOR_STRUCT(biases);
+    VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
+    acc += bias_values;
+#endif // defined(HAS_BIAS)
+
+#if WEIGHTS_OFFSET != 0
+    acc += WEIGHTS_OFFSET * sum;
+#endif /* WEIGHTS_OFFSET != 0 */
+
+#if INPUT_OFFSET != 0
+    acc += INPUT_OFFSET * sum_we;
+#endif /* INPUT_OFFSET != 0 */
+
+#if K_OFFSET != 0
+    acc += (VEC_INT)K_OFFSET;
+#endif /* K_OFFSET != 0 */
+
+    acc = asymm_mult_by_quant_multiplier_less_than_one(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
+    acc += (VEC_INT)OUTPUT_OFFSET;
+
+    VEC_UCHAR res = CONVERT_SAT(acc, VEC_UCHAR);
+    res           = CLAMP(res, (VEC_UCHAR)0, (VEC_UCHAR)255);
+
+    Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
+    VSTORE(VEC_SIZE)
+    (res, 0, dst.ptr);
+}
+#endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
+
+#if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
+/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1
+ *
+ * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
+ * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
+ * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
+ * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1).
+ *
+ * @param[in] src_ptr                               Pointer to the source image. Supported data types: QASYMM8
+ * @param[in] src_stride_x                          Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x                            src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y                          Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y                            src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes     The offset of the first element in the source image
+ * @param[in] src_stride_z                          Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z                            src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr                               Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x                          Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x                            dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y                          Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y                            dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z                          Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z                            dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes     The offset of the first element in the destination tensor
+ * @param[in] weights_ptr                           Pointer to the weights tensor. Supported data types: same as @p src_ptr
+ * @param[in] weights_stride_x                      Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x                        weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y                      Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y                        weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z                      Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z                        weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr                            (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
+ * @param[in] biases_stride_x                       (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x                         (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes  (Optional) The offset of the first element in the biases vector
+ * @param[in] max_offset                            Max offset for the input tensor
+ */
+
 __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1(
     TENSOR3D_DECLARATION(src),
     TENSOR3D_DECLARATION(dst),
     TENSOR3D_DECLARATION(weights),
 #if defined(HAS_BIAS)
-    VECTOR_DECLARATION(biases)
+    VECTOR_DECLARATION(biases),
 #endif /* defined(HAS_BIAS) */
-)
+    int max_offset)
 {
     int x = get_global_id(0);
     int y = get_global_id(1);
     int z = get_global_id(2);
 
-    Image  dst     = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
     Vector weights = CONVERT_TO_VECTOR_STRUCT(weights);
+
+    __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
+
+    int  z_coord  = 0;
+    int4 offset   = 0;
+    int4 y_offset = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3) - CONV_PAD_LEFT) * (int4)src_stride_y;
+
+    // We compute 2x2x2 [C,W,H] elements
+    VEC_INT acc0 = 0, sum0 = 0;
+    VEC_INT acc1 = 0, sum1 = 0;
+    VEC_INT acc2 = 0, sum2 = 0;
+    VEC_INT acc3 = 0, sum3 = 0;
+
+    // Load weights
+    VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z);
+
+#if INPUT_OFFSET != 0
+    VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) + CONVERT(w6, VEC_INT) + CONVERT(w7,
+                     VEC_INT)
+                     + CONVERT(w8, VEC_INT);
+#endif /* INPUT_OFFSET != 0 */
+
+    // Load input values
+    // z == 0
+    // Clamp z_coord as for z = 0, it can be negative
+    // z_coord is casted to unsigned int in order to use just a min() operation
+    // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
+    z_coord = z * NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP;
+    z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
+    offset  = y_offset + (int4)(z_coord * src_stride_z);
+    offset  = min(offset, max_offset);
+
+    VEC_UCHAR values0 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values1 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values2 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values3 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 1
+    // z_coord can be only negative for z = 0 so we do not need to clamp it
+    // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset
+    z_coord           = z * NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 1;
+    offset            = y_offset + (int4)(z_coord * src_stride_z);
+    VEC_UCHAR values4 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values5 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values6 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values7 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 2
+    // After z = 1 we can simply add src_stride_z to offset without updating z_coord
+    // However offset can be out-of-bound so we need to check if it is greater than max_offset
+    offset += (int4)src_stride_z;
+    offset             = min(offset, max_offset);
+    VEC_UCHAR values8  = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values9  = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values10 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values11 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 3
+    // After z = 1 we can simply add src_stride_z to offset without updating z_coord
+    // However offset can be out-of-bound so we need to check if it is greater than max_offset
+    offset += (int4)(src_stride_z);
+    offset             = min(offset, max_offset);
+    VEC_UCHAR values12 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values13 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values14 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values15 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    MULTIPLY_ADD_ACCUMULATE(values0, w0, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values1, w1, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values2, w2, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values1, w0, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values2, w1, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values3, w2, acc1, sum1);
+
+    MULTIPLY_ADD_ACCUMULATE(values4, w3, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values5, w4, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values6, w5, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values5, w3, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values6, w4, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values7, w5, acc1, sum1);
+
+    MULTIPLY_ADD_ACCUMULATE(values8, w6, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values9, w7, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values10, w8, acc0, sum0);
+    MULTIPLY_ADD_ACCUMULATE(values9, w6, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values10, w7, acc1, sum1);
+    MULTIPLY_ADD_ACCUMULATE(values11, w8, acc1, sum1);
+
+    MULTIPLY_ADD_ACCUMULATE(values4, w0, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values5, w1, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values6, w2, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values5, w0, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values6, w1, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values7, w2, acc3, sum3);
+
+    MULTIPLY_ADD_ACCUMULATE(values8, w3, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values9, w4, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values10, w5, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values9, w3, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values10, w4, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values11, w5, acc3, sum3);
+
+    MULTIPLY_ADD_ACCUMULATE(values12, w6, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values13, w7, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values14, w8, acc2, sum2);
+    MULTIPLY_ADD_ACCUMULATE(values13, w6, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values14, w7, acc3, sum3);
+    MULTIPLY_ADD_ACCUMULATE(values15, w8, acc3, sum3);
+
 #if defined(HAS_BIAS)
     Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
 
     VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
-#endif /* defined(HAS_BIAS) */
 
-    __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * src_step_x;
-    int8            y_coord  = (int8)(y * (src_step_y / src_stride_y)) + (int8)(0, 1, 2, 3, 4, 5, 0, 0) - CONV_PAD_LEFT;
-    int             z_coord  = z * (src_step_z / src_stride_z) - CONV_PAD_TOP;
-
-    VEC_INT sum_we = 0;
-    VEC_INT acc0 = 0, acc1 = 0, acc2 = 0, acc3 = 0;
-    VEC_INT sum0 = 0, sum1 = 0, sum2 = 0, sum3 = 0;
-
-    // z == 0
-    VEC_UCHAR w0, w1, w2;
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    int  valid_z = z_coord;
-    int8 valid_y = select(y_coord, -1, (int8)valid_z < 0);                 // If z < 0, set y to -1
-    valid_y      = select(valid_y, SRC_DIM_1, (int8)valid_z >= SRC_DIM_2); // If z >= SRC_DIM_2, set y to SRC_DIM_2
-    valid_z      = clamp(valid_z, 0, SRC_DIM_2 - 1);                       // Clamp z coordinate
-
-    VEC_UCHAR values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc1, sum1);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s5 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc3, sum3);
-
-    weights.ptr += weights_stride_z;
-
-    // z == 1
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    // Only unit pad_top/bottom allowed, this can never be out of bound
-    valid_z = z_coord + 1;
-    valid_y = y_coord;
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc1, sum1);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s5 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc3, sum3);
-
-    weights.ptr += weights_stride_z;
-
-    // z == 2
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    valid_z = z_coord + 2;
-    valid_y = select(y_coord, -1, (int8)valid_z < 0);
-    valid_y = select(valid_y, SRC_DIM_1, (int8)valid_z >= SRC_DIM_2);
-    valid_z = clamp(valid_z, 0, SRC_DIM_2 - 1);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc1, sum1);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc1, sum1);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc3, sum3);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s5 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc3, sum3);
-
-#if defined(HAS_BIAS)
     acc0 += bias_values;
     acc1 += bias_values;
     acc2 += bias_values;
@@ -501,17 +936,33 @@
     res2 = CLAMP(res2, (VEC_UCHAR)0, (VEC_UCHAR)255);
     res3 = CLAMP(res3, (VEC_UCHAR)0, (VEC_UCHAR)255);
 
+    __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z;
+
     VSTORE(VEC_SIZE)
-    (res0, 0, dst.ptr + 0 * dst_stride_y);
+    (res0, 0, dst_addr + 0 * dst_stride_y);
     VSTORE(VEC_SIZE)
-    (res1, 0, dst.ptr + 1 * dst_stride_y);
-    VSTORE(VEC_SIZE)
-    (res2, 0, dst.ptr + 2 * dst_stride_y);
-    VSTORE(VEC_SIZE)
-    (res3, 0, dst.ptr + 3 * dst_stride_y);
+    (res1, 0, dst_addr + 1 * dst_stride_y);
+
+#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
+    if((z * NUM_PLANES_PROCESSED + 1) < DST_DIM_2)
+#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
+    {
+        VSTORE(VEC_SIZE)
+        (res2, 0, dst_addr + 0 * dst_stride_y + 1 * dst_stride_z);
+        VSTORE(VEC_SIZE)
+        (res3, 0, dst_addr + 1 * dst_stride_y + 1 * dst_stride_z);
+    }
 }
 
-/** This function computes the depthwise convolution quantized.
+#if ARM_COMPUTE_OPENCL_DOT8_ENABLED
+/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product
+ *
+ * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
+ * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
+ * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
+ * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1).
  *
  * @param[in] src_ptr                               Pointer to the source image. Supported data types: QASYMM8
  * @param[in] src_stride_x                          Stride of the source image in X dimension (in bytes)
@@ -543,166 +994,175 @@
  * @param[in] biases_offset_first_element_in_bytes  (Optional) The offset of the first element in the biases vector
  */
 
-__kernel void depthwise_convolution_3x3_quantized_nhwc_stride2(
+__kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1(
     TENSOR3D_DECLARATION(src),
     TENSOR3D_DECLARATION(dst),
     TENSOR3D_DECLARATION(weights),
 #if defined(HAS_BIAS)
-    VECTOR_DECLARATION(biases)
+    VECTOR_DECLARATION(biases),
 #endif /* defined(HAS_BIAS) */
-)
+    int max_offset)
 {
     int x = get_global_id(0);
     int y = get_global_id(1);
     int z = get_global_id(2);
 
-    Image  dst     = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
     Vector weights = CONVERT_TO_VECTOR_STRUCT(weights);
+
+    __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
+
+    int  z_coord  = 0;
+    int4 offset   = 0;
+    int4 y_offset = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3) - CONV_PAD_LEFT) * (int4)src_stride_y;
+
+    // We compute 2x2x2 [C,W,H] elements
+    VEC_INT acc0 = 0, sum0 = 0;
+    VEC_INT acc1 = 0, sum1 = 0;
+    VEC_INT acc2 = 0, sum2 = 0;
+    VEC_INT acc3 = 0, sum3 = 0;
+
+    // Load weights
+    VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z);
+    VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z);
+    VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z);
+    VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z);
+
+#if INPUT_OFFSET != 0
+    VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) + CONVERT(w6, VEC_INT) + CONVERT(w7,
+                     VEC_INT)
+                     + CONVERT(w8, VEC_INT);
+#endif /* INPUT_OFFSET != 0 */
+
+    // Load input values
+    // z == 0
+    // Clamp z_coord as for z = 0, it can be negative
+    // z_coord is casted to unsigned int in order to use just a min() operation
+    // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
+    z_coord = z * NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP;
+    z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
+    offset  = y_offset + (int4)(z_coord * src_stride_z);
+    offset  = min(offset, max_offset);
+
+    VEC_UCHAR values0 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values1 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values2 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values3 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 1
+    // z_coord can be only negative for z = 0 so we do not need to clamp it
+    // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset
+    z_coord           = z * NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 1;
+    offset            = y_offset + (int4)(z_coord * src_stride_z);
+    VEC_UCHAR values4 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values5 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values6 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values7 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 2
+    // After z = 1 we can simply add src_stride_z to offset without updating z_coord
+    // However offset can be out-of-bound so we need to check if it is greater than max_offset
+    offset += (int4)src_stride_z;
+    offset             = min(offset, max_offset);
+    VEC_UCHAR values8  = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values9  = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values10 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values11 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    // z == 3
+    // After z = 1 we can simply add src_stride_z to offset without updating z_coord
+    // However offset can be out-of-bound so we need to check if it is greater than max_offset
+    offset += (int4)(src_stride_z);
+    offset             = min(offset, max_offset);
+    VEC_UCHAR values12 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0);
+    VEC_UCHAR values13 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1);
+    VEC_UCHAR values14 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2);
+    VEC_UCHAR values15 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3);
+
+    DOT_PRODUCT_ACCUMULATE(acc0, sum0, values0, values1, values2, values4, values5, values6, values8, values9, values10, w0, w1, w2, w3, w4, w5, w6, w7, w8);
+    DOT_PRODUCT_ACCUMULATE(acc1, sum1, values1, values2, values3, values5, values6, values7, values9, values10, values11, w0, w1, w2, w3, w4, w5, w6, w7, w8);
+    DOT_PRODUCT_ACCUMULATE(acc2, sum2, values4, values5, values6, values8, values9, values10, values12, values13, values14, w0, w1, w2, w3, w4, w5, w6, w7, w8);
+    DOT_PRODUCT_ACCUMULATE(acc3, sum3, values5, values6, values7, values9, values10, values11, values13, values14, values15, w0, w1, w2, w3, w4, w5, w6, w7, w8);
+
 #if defined(HAS_BIAS)
     Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
 
     VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
-#endif /* defined(HAS_BIAS) */
 
-    __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * src_step_x;
-    int8            y_coord  = (int8)(y * (src_step_y / src_stride_y)) + (int8)(0, 1, 2, 3, 4, 5, 0, 0) - CONV_PAD_LEFT;
-    int             z_coord  = z * (src_step_z / src_stride_z) - CONV_PAD_TOP;
-
-    VEC_INT sum_we = 0;
-    VEC_INT acc0 = 0, acc2 = 0;
-    VEC_INT sum0 = 0, sum2 = 0;
-
-    // z == 0
-    VEC_UCHAR w0, w1, w2;
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    int  valid_z = z_coord;
-    int8 valid_y = select(y_coord, -1, (int8)valid_z < 0);                 // If z < 0, set y to -1
-    valid_y      = select(valid_y, SRC_DIM_1, (int8)valid_z >= SRC_DIM_2); // If z >= SRC_DIM_2, set y to SRC_DIM_2
-    valid_z      = clamp(valid_z, 0, SRC_DIM_2 - 1);                       // Clamp z coordinate
-
-    VEC_UCHAR values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-
-    weights.ptr += weights_stride_z;
-
-    // z == 1
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    // Only unit pad_top/bottom allowed, this can never be out of bound
-    valid_z = z_coord + 1;
-    valid_y = y_coord;
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-
-    weights.ptr += weights_stride_z;
-
-    // z == 2
-    w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
-    w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
-    w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
-
-#if INPUT_OFFSET != 0
-    sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
-    valid_z = z_coord + 2;
-    valid_y = select(y_coord, -1, (int8)valid_z < 0);
-    valid_y = select(valid_y, SRC_DIM_1, (int8)valid_z >= SRC_DIM_2);
-    valid_z = clamp(valid_z, 0, SRC_DIM_2 - 1);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s0 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s1 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc0, sum0);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s2 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc0, sum0);
-    MULTIPLY_ADD_ACCUMULATE(values, w0, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s3 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w1, acc2, sum2);
-
-    values = VLOAD(VEC_SIZE)(0, src_addr + valid_y.s4 * (int)src_stride_y + valid_z * src_stride_z);
-    MULTIPLY_ADD_ACCUMULATE(values, w2, acc2, sum2);
-
-#if defined(HAS_BIAS)
     acc0 += bias_values;
+    acc1 += bias_values;
     acc2 += bias_values;
+    acc3 += bias_values;
 #endif /* defined(HAS_BIAS) */
 
 #if WEIGHTS_OFFSET != 0
     acc0 += WEIGHTS_OFFSET * sum0;
+    acc1 += WEIGHTS_OFFSET * sum1;
     acc2 += WEIGHTS_OFFSET * sum2;
+    acc3 += WEIGHTS_OFFSET * sum3;
 #endif /* WEIGHTS_OFFSET != 0 */
 
 #if INPUT_OFFSET != 0
     VEC_INT offs = INPUT_OFFSET * sum_we;
 
     acc0 += offs;
+    acc1 += offs;
     acc2 += offs;
+    acc3 += offs;
 #endif /* INPUT_OFFSET != 0 */
 
 #if K_OFFSET != 0
     acc0 += (VEC_INT)K_OFFSET;
+    acc1 += (VEC_INT)K_OFFSET;
     acc2 += (VEC_INT)K_OFFSET;
+    acc3 += (VEC_INT)K_OFFSET;
 #endif /* K_OFFSET != 0 */
 
     acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
+    acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
     acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
+    acc3 = asymm_mult_by_quant_multiplier_less_than_one(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
+
     acc0 += (VEC_INT)OUTPUT_OFFSET;
+    acc1 += (VEC_INT)OUTPUT_OFFSET;
     acc2 += (VEC_INT)OUTPUT_OFFSET;
+    acc3 += (VEC_INT)OUTPUT_OFFSET;
+
     VEC_UCHAR res0 = CONVERT_SAT(acc0, VEC_UCHAR);
+    VEC_UCHAR res1 = CONVERT_SAT(acc1, VEC_UCHAR);
     VEC_UCHAR res2 = CONVERT_SAT(acc2, VEC_UCHAR);
-    res0           = CLAMP(res0, (VEC_UCHAR)0, (VEC_UCHAR)255);
-    res2           = CLAMP(res2, (VEC_UCHAR)0, (VEC_UCHAR)255);
+    VEC_UCHAR res3 = CONVERT_SAT(acc3, VEC_UCHAR);
+
+    res0 = CLAMP(res0, (VEC_UCHAR)0, (VEC_UCHAR)255);
+    res1 = CLAMP(res1, (VEC_UCHAR)0, (VEC_UCHAR)255);
+    res2 = CLAMP(res2, (VEC_UCHAR)0, (VEC_UCHAR)255);
+    res3 = CLAMP(res3, (VEC_UCHAR)0, (VEC_UCHAR)255);
+
+    __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z;
 
     VSTORE(VEC_SIZE)
-    (res0, 0, dst.ptr + 0 * dst_stride_y);
+    (res0, 0, dst_addr + 0 * dst_stride_y);
     VSTORE(VEC_SIZE)
-    (res2, 0, dst.ptr + 1 * dst_stride_y);
+    (res1, 0, dst_addr + 1 * dst_stride_y);
+
+#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
+    if((z * NUM_PLANES_PROCESSED + 1) < DST_DIM_2)
+#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
+    {
+        VSTORE(VEC_SIZE)
+        (res2, 0, dst_addr + 0 * dst_stride_y + 1 * dst_stride_z);
+        VSTORE(VEC_SIZE)
+        (res3, 0, dst_addr + 1 * dst_stride_y + 1 * dst_stride_z);
+    }
 }
+#endif // ARM_COMPUTE_OPENCL_DOT8_ENABLED
 
-#endif /* defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) */
+#endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
 
-#endif /* defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) */
+#endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
+
+#endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index dcc471e..3f7a2a5 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -30,9 +30,12 @@
 
 #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
 #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
 #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED)
 
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED)
+
 #if defined(ARM_COMPUTE_DEBUG_ENABLED)
 #if defined(cl_arm_printf)
 #pragma OPENCL EXTENSION cl_arm_printf : enable
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
index 8bd62c6..e091e5c 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
@@ -54,7 +54,6 @@
                                     "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported");
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
     ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != 3 || weights->dimension(1) != 3);
-    ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(2) * depth_multiplier) != output->dimension(2));
     ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
 
     const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
@@ -170,9 +169,11 @@
     }
     else
     {
-        kernel_name                       = is_qasymm ? "depthwise_convolution_3x3_quantized_nchw" : "depthwise_convolution_3x3";
+        const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device());
+
+        kernel_name                       = is_qasymm ? (std::string("depthwise_convolution_3x3_quantized") + (is_dot8_supported ? "_dot8" : "") + "_nchw") : "depthwise_convolution_3x3";
         num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
-        num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y < 3) ? (2 / conv_stride_y) : 1;
+        num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y == 1) ? 2 : 1;
         num_elems_read_per_iteration_x    = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x;
         num_elems_read_per_iteration_y    = num_elems_written_per_iteration_y + 2;
     }
@@ -210,6 +211,7 @@
                                                          ActivationLayerInfo act_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info));
 
     bool is_qasymm = is_data_type_quantized_asymmetric(input->info()->data_type());
 
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
index 7754e1b..610bfb5 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
@@ -82,23 +82,26 @@
 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output,
                                                         const PadStrideInfo &conv_info)
 {
+    // Get convolved dimensions
+    const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, 1 /* depth_multiplier */);
+
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*output,
+                       output_shape,
+                       1,
+                       input->data_type(),
+                       input->quantization_info());
+
     const bool is_qasymm   = is_data_type_quantized_asymmetric(input->data_type());
     const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
 
-    const unsigned int num_rows_processed_per_iteration = is_qasymm ? 4 : (is_stride_1 ? 2 : 1);
+    const unsigned int num_rows_processed_per_iteration = is_stride_1 ? 2 : 1;
     const unsigned int num_elems_accessed_per_iteration = is_qasymm ? 4 : 2;
     const unsigned int num_rows_read_per_iteration      = num_rows_processed_per_iteration + 2;
     const unsigned int num_rows_written_per_iteration   = std::ceil(num_rows_processed_per_iteration / static_cast<float>(conv_info.stride().first));
 
     BorderSize border_size;
-    if(is_qasymm)
-    {
-        border_size = BorderSize(std::max(conv_info.pad_left(), conv_info.pad_top()), 0, std::max(conv_info.pad_right(), conv_info.pad_bottom()), 0);
-    }
-    else
-    {
-        border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0);
-    }
+    border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0);
 
     // Configure kernel window
     Window win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_written_per_iteration));
@@ -163,20 +166,12 @@
     _weights                            = weights;
     _biases                             = biases;
     _conv_stride_y                      = conv_info.stride().second;
-    _num_rows_processed_per_iteration   = is_qasymm ? 4 : (is_stride_1 ? 2 : 1);
-    _num_planes_processed_per_iteration = (is_stride_1 && !is_qasymm) ? 2 : 1;
+    _num_rows_processed_per_iteration   = is_stride_1 ? 2 : 1;
+    _num_planes_processed_per_iteration = is_stride_1 ? 2 : 1;
+    _border_size                        = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0);
 
     const unsigned int num_elems_accessed_per_iteration = is_qasymm ? 4 : 2;
 
-    if(is_qasymm)
-    {
-        _border_size = BorderSize(std::max(conv_info.pad_left(), conv_info.pad_top()), 0, std::max(conv_info.pad_right(), conv_info.pad_bottom()), 0);
-    }
-    else
-    {
-        _border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0);
-    }
-
     CLBuildOptions build_opts;
     build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
     build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_accessed_per_iteration));
@@ -226,7 +221,8 @@
             }
         }
     }
-    else if(is_stride_1)
+
+    if(is_stride_1)
     {
         build_opts.add_option("-DNUM_ROWS_PROCESSED=" + support::cpp11::to_string(_num_rows_processed_per_iteration));
         build_opts.add_option("-DNUM_PLANES_PROCESSED=" + support::cpp11::to_string(_num_planes_processed_per_iteration));
@@ -239,11 +235,9 @@
     }
 
     // Create kernel
-    std::string kernel_name = std::string("depthwise_convolution_3x3") + (is_qasymm ? std::string("_quantized") : std::string()) + std::string("_nhwc");
-    if(is_qasymm || is_stride_1)
-    {
-        kernel_name += std::string("_stride") + support::cpp11::to_string(conv_stride_x);
-    }
+    const bool  is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device());
+    std::string kernel_name       = std::string("depthwise_convolution_3x3") + (is_qasymm ? std::string("_quantized") + ((is_dot8_supported
+                                                                                                                          && is_stride_1 /* FIXME (COMPMID-1424) */) ? "_dot8" : "") : "") + "_nhwc" + (is_stride_1 ? "_stride1" : "");
 
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
 
@@ -299,23 +293,19 @@
     Window slice_in  = win_in.first_slice_window_3D();
     Window slice_out = win.first_slice_window_3D();
 
+    unsigned int idx = 3 * num_arguments_per_3D_tensor();
+
     if(_biases != nullptr)
     {
-        unsigned int idx = 3 * num_arguments_per_3D_tensor();
-        Window       win_biases;
+        Window win_biases;
         win_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
         win_biases.set_dimension_step(Window::DimX, window.x().step());
         add_1D_tensor_argument(idx, _biases, win_biases);
     }
 
-    if(!(is_data_type_quantized_asymmetric(_input->info()->data_type())))
-    {
-        unsigned int idx        = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
-        const int    max_offset = _input->info()->strides_in_bytes().z() * _input->info()->dimension(2) - (_input->info()->padding().bottom + _input->info()->padding().top) *
-                                  _input->info()->strides_in_bytes().y();
-
-        _kernel.setArg(idx, max_offset);
-    }
+    const int max_offset = _input->info()->strides_in_bytes().z() * _input->info()->dimension(2) - (_input->info()->padding().bottom + _input->info()->padding().top) *
+                           _input->info()->strides_in_bytes().y();
+    _kernel.setArg(idx, max_offset);
 
     do
     {