COMPMID-882 - Optimizing GEMMLowp on OpenCL reshaping matrices

This new optimization allows to achieve 36.3 % of MAC utilisation on Mate 9 @ 1GHz.
The performance have been reported here
https://confluence.arm.com/display/MLENG/GEMMLowp+performance%3A+ACL+18.02

Change-Id: I71b6a217068763dfdc11bbf3574ee0eb94f93679
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118531
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 0847612..5452b8a 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -237,7 +237,8 @@
     { "gemmlowp_matrix_b_reduction", "gemmlowp.cl" },
     { "gemmlowp_mm_bifrost", "gemmlowp.cl" },
     { "gemmlowp_mm_midgard", "gemmlowp.cl" },
-    { "gemmlowp_mm_interleaved_transposed", "gemmlowp.cl" },
+    { "gemmlowp_mm_interleaved_transposed_bifrost", "gemmlowp.cl" },
+    { "gemmlowp_mm_interleaved_transposed_midgard", "gemmlowp.cl" },
     { "gemmlowp_offset_contribution", "gemmlowp.cl" },
     { "gemmlowp_output_stage_quantize_down", "gemmlowp.cl" },
     { "gemmlowp_output_stage_quantize_down_fixedpoint", "gemmlowp.cl" },
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index bad09f3..58a550f 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -29,19 +29,20 @@
 
 #if defined(TRANSPOSE_W) && defined(MULT_TRANSPOSE1XW_WIDTH)
 
-#if TRANSPOSE_W == 4
-#define DATA_TYPE uint
-#elif TRANSPOSE_W == 8
-#define DATA_TYPE ushort
-#elif TRANSPOSE_W == 16
+#if ELEMENT_SIZE == 1
 #define DATA_TYPE uchar
-#else // TRANSPOSE_W == 16
-#error "Transpose width not supported"
-#endif // TRANSPOSE_W
+#elif ELEMENT_SIZE == 2
+#define DATA_TYPE ushort
+#elif ELEMENT_SIZE == 4
+#define DATA_TYPE uint
+#else // ELEMENT_SIZE == 1
+#error "Element size not supported"
+#endif // ELEMENT_SIZE
 
 /** This OpenCL kernel computes the "vector" 1xW transposition of input matrix
  *
- * @attention The multiplication factor (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The transposition width must be passed at compile time using -DTRANSPOSE_W (i.e. -DTRANSPOSE_W)
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
  *
  * @param[in]  src_ptr                           Pointer to the source matrix. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
  * @param[in]  src_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -81,6 +82,9 @@
 
 /** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values
  *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=float)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
+ *
  * @param[in]  src_ptr                           Pointer to the source matrix. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
  * @param[in]  src_stride_x                      Stride of the source matrix in X dimension (in bytes)
  * @param[in]  src_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
@@ -137,7 +141,9 @@
 /** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
  *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
  *
- * @attention The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data types: F32
  * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -240,7 +246,9 @@
 /** This OpenCL kernel is optimized for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
  *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
  *
- * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data types: F32
  * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -461,7 +469,9 @@
 /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
  *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
  *
- * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data types: F16
  * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -566,7 +576,9 @@
 /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision
  *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
  *
- * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
+ * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @note: ALPHA must be passed in 8 bit fixed point format
  *
@@ -665,7 +677,9 @@
 /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 16 bit fixed point precision
  *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
  *
- * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
+ * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @note: ALPHA must be passed in 16 bit fixed point format
  *
@@ -1643,7 +1657,7 @@
 #if defined(BETA)
 /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
  *
- * @attention The beta's value need to be passed at compile time using -DBETA
+ * @note The beta's value need to be passed at compile time using -DBETA
  *
  * @param[in]  src_ptr                           Pointer to the source matrix. Supported data types: F32
  * @param[in]  src_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -1680,7 +1694,7 @@
 
 /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
  *
- * @attention The beta's value need to be passed at compile time using -DBETA
+ * @note The beta's value need to be passed at compile time using -DBETA
  *
  * @param[in]  src_ptr                           Pointer to the source matrix. Supported data types: F16
  * @param[in]  src_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -1718,7 +1732,7 @@
 #if defined(FIXED_POINT_POSITION)
 /** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 8 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta:
  *
- * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
+ * @note The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
  *
  * @note: BETA must be passed in 8 bit fixed point format
  *
@@ -1757,7 +1771,7 @@
 
 /** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 16 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta:
  *
- * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
+ * @note The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
  *
  * @note: BETA must be passed in 16 bit fixed point format
  *
@@ -1799,9 +1813,9 @@
 #if defined(WIDTH_VECTOR_A)
 /** This OpenCL kernel computes the vector by matrix multiplication between each row of A (src0) and matrix B (src1) used for locally connected layer
  *
- * @attention The width of A need to be passed at compile time using -DWIDTH_VECTOR_A
+ * @note The width of A need to be passed at compile time using -DWIDTH_VECTOR_A
  *
- * @attention The input A and matrix B must not be reshaped
+ * @note The input A and matrix B must not be reshaped
  *
  * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data types: F32
  * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index d724600..5e144d7 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -24,11 +24,13 @@
 #include "helpers.h"
 #include "helpers_asymm.h"
 
-#if defined(COLS_B)
+#if defined(COLS_B) && defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(TRANSPOSE1XW_WIDTH_STEP)
 /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- *  Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
+ *  Matrix A and matrix B must be reshaped respectively with @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel before running the matrix multiplication
  *
- * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B
+ * @note The number of matrix B columns needs to be passed at compile time using -DCOLS_B: e.g. -DCOLS_B=1024
+ * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
  *
  * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data type: QASYMM8
  * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
@@ -49,69 +51,370 @@
  * @param[in]  dst_step_y                         dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  dst_offset_first_element_in_bytes  The offset of the first element in the destination matrix
  */
-__kernel void gemmlowp_mm_interleaved_transposed(IMAGE_DECLARATION(src0),
-                                                 IMAGE_DECLARATION(src1),
-                                                 IMAGE_DECLARATION(dst))
+__kernel void gemmlowp_mm_interleaved_transposed_midgard(IMAGE_DECLARATION(src0),
+                                                         IMAGE_DECLARATION(src1),
+                                                         IMAGE_DECLARATION(dst))
 {
-    // src_addr.s0 = address of matrix A
-    // src_addr.s1 = address of matrix B
-    // Compute address for matrix A and B
-    int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
-                                                                        (src1_stride_y));
+    int x = get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP;
+    int y = get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT;
 
-    // Add offset_first_element_in_bytes
-    src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+    // Offset
+    const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4;
+    const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4;
+
+    // src_addr_a = address of matrix A
+    // src_addr_b = address of matrix B
+    __global uchar *src_addr_a = (__global uchar *)(src0_ptr + y * src0_stride_y + src0_offset_first_element_in_bytes);
+    __global uchar *src_addr_b = (__global uchar *)(src1_ptr + x * src1_stride_y + src1_offset_first_element_in_bytes);
 
     // Compute end row address for matrix B
-    int end_row_mtx_b = src_addr.s1 + COLS_B;
+    __global uchar *src_end_addr_b = src_addr_b + COLS_B;
+
+    src_addr_a += offset_row_a;
+    src_addr_b += offset_row_b;
 
     // Reset accumulators
-    int16 c00 = 0;
-    int16 c10 = 0;
-    int16 c20 = 0;
-    int16 c30 = 0;
+    int4 c00 = 0;
+    int4 c10 = 0;
+    int4 c20 = 0;
+    int4 c30 = 0;
 
-    for(; src_addr.s1 <= (end_row_mtx_b - 32); src_addr += (int2)(8, 32))
+    for(; src_addr_b <= (src_end_addr_b - (int)(8 * TRANSPOSE1XW_WIDTH_STEP)); src_addr_a += 8 * MULT_INTERLEAVE4X4_HEIGHT, src_addr_b += 8 * TRANSPOSE1XW_WIDTH_STEP)
     {
         // Load values from matrix A (interleaved) and matrix B (transposed)
-        int8 a0  = convert_int8(vload8(0, ((__global uchar *)src0_ptr) + src_addr.s0));
-        int16 b0 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1));
+        int4 a0 = convert_int4(vload4(0, src_addr_a));
+        int4 b0 = convert_int4(vload4(0, src_addr_b));
 
-        c00 += (int16)a0.s0 * b0;
-        c10 += (int16)a0.s1 * b0;
-        c20 += (int16)a0.s2 * b0;
-        c30 += (int16)a0.s3 * b0;
+        c00 += (int4)a0.s0 * b0;
+        c10 += (int4)a0.s1 * b0;
+        c20 += (int4)a0.s2 * b0;
+        c30 += (int4)a0.s3 * b0;
 
-        int16 b1 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1 + 16));
+        a0 = convert_int4(vload4(0, src_addr_a + 4 * MULT_INTERLEAVE4X4_HEIGHT));
+        b0 = convert_int4(vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP));
 
-        c00 += (int16)a0.s4 * b1;
-        c10 += (int16)a0.s5 * b1;
-        c20 += (int16)a0.s6 * b1;
-        c30 += (int16)a0.s7 * b1;
+        c00 += (int4)a0.s0 * b0;
+        c10 += (int4)a0.s1 * b0;
+        c20 += (int4)a0.s2 * b0;
+        c30 += (int4)a0.s3 * b0;
     }
 
-    for(; src_addr.s1 < end_row_mtx_b; src_addr += (int2)(4, 16))
+    for(; src_addr_b < src_end_addr_b; src_addr_a += (4 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (4 * TRANSPOSE1XW_WIDTH_STEP))
     {
         // Load values from matrix A (interleaved) and matrix B (transposed)
-        int4 a0  = convert_int4(vload4(0, ((__global uchar *)src0_ptr) + src_addr.s0));
-        int16 b0 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1));
+        int4 a0 = convert_int4(vload4(0, src_addr_a));
+        int4 b0 = convert_int4(vload4(0, src_addr_b));
 
-        c00 += (int16)a0.s0 * b0;
-        c10 += (int16)a0.s1 * b0;
-        c20 += (int16)a0.s2 * b0;
-        c30 += (int16)a0.s3 * b0;
+        c00 += (int4)a0.s0 * b0;
+        c10 += (int4)a0.s1 * b0;
+        c20 += (int4)a0.s2 * b0;
+        c30 += (int4)a0.s3 * b0;
     }
 
     // Compute destination address
     Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
 
-    // Store 4x16 block
-    vstore16(c00, 0, (__global int *)(offset(&dst, 0, 0)));
-    vstore16(c10, 0, (__global int *)(offset(&dst, 0, 1)));
-    vstore16(c20, 0, (__global int *)(offset(&dst, 0, 2)));
-    vstore16(c30, 0, (__global int *)(offset(&dst, 0, 3)));
+    // Store 4x4 block
+    vstore4(c00, 0, (__global int *)(offset(&dst, 0, 0)));
+    vstore4(c10, 0, (__global int *)(offset(&dst, 0, 1)));
+    vstore4(c20, 0, (__global int *)(offset(&dst, 0, 2)));
+    vstore4(c30, 0, (__global int *)(offset(&dst, 0, 3)));
 }
-#endif // defined(COLS_B)
+
+/** This OpenCL kernel is optimized for Bifrost and computes the matrix multiplication between matrix A (src0) and matrix B (src1)
+ *  Matrix A and matrix B must be reshaped respectively with @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel before running the matrix multiplication
+ *
+ * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B
+ * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2)
+ * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
+ *
+ * @param[in]  src0_ptr                           Pointer to the source matrix. Supported data type: QASYMM8
+ * @param[in]  src0_stride_x                      Stride of the source matrix in X dimension (in bytes)
+ * @param[in]  src0_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  src0_stride_y                      Stride of the source matrix in Y dimension (in bytes)
+ * @param[in]  src0_step_y                        src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  src0_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[in]  src1_ptr                           Pointer to the source matrix. Supported data type: same as @p src0_ptr
+ * @param[in]  src1_stride_x                      Stride of the source matrix in X dimension (in bytes)
+ * @param[in]  src1_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  src1_stride_y                      Stride of the source matrix in Y dimension (in bytes)
+ * @param[in]  src1_step_y                        src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  src1_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr                            Pointer to the destination matrix Supported data type: S32
+ * @param[in]  dst_stride_x                       Stride of the destination matrix in X dimension (in bytes)
+ * @param[in]  dst_step_x                         dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  dst_stride_y                       Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in]  dst_step_y                         dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  dst_offset_first_element_in_bytes  The offset of the first element in the destination matrix
+ */
+__kernel void gemmlowp_mm_interleaved_transposed_bifrost(IMAGE_DECLARATION(src0),
+                                                         IMAGE_DECLARATION(src1),
+                                                         IMAGE_DECLARATION(dst))
+{
+    int x = get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP;
+    int y = get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT;
+
+    // Offset
+    const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4;
+    const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4;
+
+    // src_addr_a = address of matrix A
+    // src_addr_b = address of matrix B
+    __global uchar *src_addr_a = (__global uchar *)(src0_ptr + y * src0_stride_y + src0_offset_first_element_in_bytes);
+    __global uchar *src_addr_b = (__global uchar *)(src1_ptr + x * src1_stride_y + src1_offset_first_element_in_bytes);
+
+    // Compute end row address for matrix B
+    __global uchar *src_end_addr_b = src_addr_b + COLS_B;
+
+    src_addr_a += offset_row_a;
+    src_addr_b += offset_row_b;
+
+    // Reset accumulators
+    uint c00 = 0;
+    uint c01 = 0;
+    uint c02 = 0;
+    uint c03 = 0;
+    uint c10 = 0;
+    uint c11 = 0;
+    uint c12 = 0;
+    uint c13 = 0;
+    uint c20 = 0;
+    uint c21 = 0;
+    uint c22 = 0;
+    uint c23 = 0;
+    uint c30 = 0;
+    uint c31 = 0;
+    uint c32 = 0;
+    uint c33 = 0;
+
+#if MULT_INTERLEAVE4X4_HEIGHT == 1
+    for(; src_addr_b <= (src_end_addr_b - (int)(32 * TRANSPOSE1XW_WIDTH_STEP)); src_addr_a += (32 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (32 * TRANSPOSE1XW_WIDTH_STEP))
+    {
+        // Load values from matrix A (interleaved) and matrix B (transposed)
+        uchar16 a0 = vload16(0, src_addr_a);
+        uchar4  b0 = vload4(0, src_addr_b);
+
+        c00 += (ushort)a0.s0 * b0.s0;
+        c01 += (ushort)a0.s0 * b0.s1;
+        c02 += (ushort)a0.s0 * b0.s2;
+        c03 += (ushort)a0.s0 * b0.s3;
+
+        c10 += (ushort)a0.s1 * b0.s0;
+        c11 += (ushort)a0.s1 * b0.s1;
+        c12 += (ushort)a0.s1 * b0.s2;
+        c13 += (ushort)a0.s1 * b0.s3;
+
+        c20 += (ushort)a0.s2 * b0.s0;
+        c21 += (ushort)a0.s2 * b0.s1;
+        c22 += (ushort)a0.s2 * b0.s2;
+        c23 += (ushort)a0.s2 * b0.s3;
+
+        c30 += (ushort)a0.s3 * b0.s0;
+        c31 += (ushort)a0.s3 * b0.s1;
+        c32 += (ushort)a0.s3 * b0.s2;
+        c33 += (ushort)a0.s3 * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.s4 * b0.s0;
+        c01 += (ushort)a0.s4 * b0.s1;
+        c02 += (ushort)a0.s4 * b0.s2;
+        c03 += (ushort)a0.s4 * b0.s3;
+
+        c10 += (ushort)a0.s5 * b0.s0;
+        c11 += (ushort)a0.s5 * b0.s1;
+        c12 += (ushort)a0.s5 * b0.s2;
+        c13 += (ushort)a0.s5 * b0.s3;
+
+        c20 += (ushort)a0.s6 * b0.s0;
+        c21 += (ushort)a0.s6 * b0.s1;
+        c22 += (ushort)a0.s6 * b0.s2;
+        c23 += (ushort)a0.s6 * b0.s3;
+
+        c30 += (ushort)a0.s7 * b0.s0;
+        c31 += (ushort)a0.s7 * b0.s1;
+        c32 += (ushort)a0.s7 * b0.s2;
+        c33 += (ushort)a0.s7 * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 8 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.s8 * b0.s0;
+        c01 += (ushort)a0.s8 * b0.s1;
+        c02 += (ushort)a0.s8 * b0.s2;
+        c03 += (ushort)a0.s8 * b0.s3;
+
+        c10 += (ushort)a0.s9 * b0.s0;
+        c11 += (ushort)a0.s9 * b0.s1;
+        c12 += (ushort)a0.s9 * b0.s2;
+        c13 += (ushort)a0.s9 * b0.s3;
+
+        c20 += (ushort)a0.sA * b0.s0;
+        c21 += (ushort)a0.sA * b0.s1;
+        c22 += (ushort)a0.sA * b0.s2;
+        c23 += (ushort)a0.sA * b0.s3;
+
+        c30 += (ushort)a0.sB * b0.s0;
+        c31 += (ushort)a0.sB * b0.s1;
+        c32 += (ushort)a0.sB * b0.s2;
+        c33 += (ushort)a0.sB * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 12 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.sC * b0.s0;
+        c01 += (ushort)a0.sC * b0.s1;
+        c02 += (ushort)a0.sC * b0.s2;
+        c03 += (ushort)a0.sC * b0.s3;
+
+        c10 += (ushort)a0.sD * b0.s0;
+        c11 += (ushort)a0.sD * b0.s1;
+        c12 += (ushort)a0.sD * b0.s2;
+        c13 += (ushort)a0.sD * b0.s3;
+
+        c20 += (ushort)a0.sE * b0.s0;
+        c21 += (ushort)a0.sE * b0.s1;
+        c22 += (ushort)a0.sE * b0.s2;
+        c23 += (ushort)a0.sE * b0.s3;
+
+        c30 += (ushort)a0.sF * b0.s0;
+        c31 += (ushort)a0.sF * b0.s1;
+        c32 += (ushort)a0.sF * b0.s2;
+        c33 += (ushort)a0.sF * b0.s3;
+
+        // Load values from matrix A (interleaved) and matrix B (transposed)
+        a0 = vload16(0, src_addr_a + 16);
+        b0 = vload4(0, src_addr_b + 16 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.s0 * b0.s0;
+        c01 += (ushort)a0.s0 * b0.s1;
+        c02 += (ushort)a0.s0 * b0.s2;
+        c03 += (ushort)a0.s0 * b0.s3;
+
+        c10 += (ushort)a0.s1 * b0.s0;
+        c11 += (ushort)a0.s1 * b0.s1;
+        c12 += (ushort)a0.s1 * b0.s2;
+        c13 += (ushort)a0.s1 * b0.s3;
+
+        c20 += (ushort)a0.s2 * b0.s0;
+        c21 += (ushort)a0.s2 * b0.s1;
+        c22 += (ushort)a0.s2 * b0.s2;
+        c23 += (ushort)a0.s2 * b0.s3;
+
+        c30 += (ushort)a0.s3 * b0.s0;
+        c31 += (ushort)a0.s3 * b0.s1;
+        c32 += (ushort)a0.s3 * b0.s2;
+        c33 += (ushort)a0.s3 * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 20 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.s4 * b0.s0;
+        c01 += (ushort)a0.s4 * b0.s1;
+        c02 += (ushort)a0.s4 * b0.s2;
+        c03 += (ushort)a0.s4 * b0.s3;
+
+        c10 += (ushort)a0.s5 * b0.s0;
+        c11 += (ushort)a0.s5 * b0.s1;
+        c12 += (ushort)a0.s5 * b0.s2;
+        c13 += (ushort)a0.s5 * b0.s3;
+
+        c20 += (ushort)a0.s6 * b0.s0;
+        c21 += (ushort)a0.s6 * b0.s1;
+        c22 += (ushort)a0.s6 * b0.s2;
+        c23 += (ushort)a0.s6 * b0.s3;
+
+        c30 += (ushort)a0.s7 * b0.s0;
+        c31 += (ushort)a0.s7 * b0.s1;
+        c32 += (ushort)a0.s7 * b0.s2;
+        c33 += (ushort)a0.s7 * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 24 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.s8 * b0.s0;
+        c01 += (ushort)a0.s8 * b0.s1;
+        c02 += (ushort)a0.s8 * b0.s2;
+        c03 += (ushort)a0.s8 * b0.s3;
+
+        c10 += (ushort)a0.s9 * b0.s0;
+        c11 += (ushort)a0.s9 * b0.s1;
+        c12 += (ushort)a0.s9 * b0.s2;
+        c13 += (ushort)a0.s9 * b0.s3;
+
+        c20 += (ushort)a0.sA * b0.s0;
+        c21 += (ushort)a0.sA * b0.s1;
+        c22 += (ushort)a0.sA * b0.s2;
+        c23 += (ushort)a0.sA * b0.s3;
+
+        c30 += (ushort)a0.sB * b0.s0;
+        c31 += (ushort)a0.sB * b0.s1;
+        c32 += (ushort)a0.sB * b0.s2;
+        c33 += (ushort)a0.sB * b0.s3;
+
+        // Load values from matrix B (transposed)
+        b0 = vload4(0, src_addr_b + 28 * TRANSPOSE1XW_WIDTH_STEP);
+
+        c00 += (ushort)a0.sC * b0.s0;
+        c01 += (ushort)a0.sC * b0.s1;
+        c02 += (ushort)a0.sC * b0.s2;
+        c03 += (ushort)a0.sC * b0.s3;
+
+        c10 += (ushort)a0.sD * b0.s0;
+        c11 += (ushort)a0.sD * b0.s1;
+        c12 += (ushort)a0.sD * b0.s2;
+        c13 += (ushort)a0.sD * b0.s3;
+
+        c20 += (ushort)a0.sE * b0.s0;
+        c21 += (ushort)a0.sE * b0.s1;
+        c22 += (ushort)a0.sE * b0.s2;
+        c23 += (ushort)a0.sE * b0.s3;
+
+        c30 += (ushort)a0.sF * b0.s0;
+        c31 += (ushort)a0.sF * b0.s1;
+        c32 += (ushort)a0.sF * b0.s2;
+        c33 += (ushort)a0.sF * b0.s3;
+    }
+#endif // MULT_INTERLEAVE4X4_HEIGHT == 1
+
+    for(; src_addr_b < src_end_addr_b; src_addr_a += (4 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (4 * TRANSPOSE1XW_WIDTH_STEP))
+    {
+        // Load values from matrix A (interleaved) and matrix B (transposed)
+        uchar4 a0 = vload4(0, src_addr_a);
+        uchar4 b0 = vload4(0, src_addr_b);
+
+        c00 += (ushort)a0.s0 * b0.s0;
+        c01 += (ushort)a0.s0 * b0.s1;
+        c02 += (ushort)a0.s0 * b0.s2;
+        c03 += (ushort)a0.s0 * b0.s3;
+
+        c10 += (ushort)a0.s1 * b0.s0;
+        c11 += (ushort)a0.s1 * b0.s1;
+        c12 += (ushort)a0.s1 * b0.s2;
+        c13 += (ushort)a0.s1 * b0.s3;
+
+        c20 += (ushort)a0.s2 * b0.s0;
+        c21 += (ushort)a0.s2 * b0.s1;
+        c22 += (ushort)a0.s2 * b0.s2;
+        c23 += (ushort)a0.s2 * b0.s3;
+
+        c30 += (ushort)a0.s3 * b0.s0;
+        c31 += (ushort)a0.s3 * b0.s1;
+        c32 += (ushort)a0.s3 * b0.s2;
+        c33 += (ushort)a0.s3 * b0.s3;
+    }
+
+    // Compute destination address
+    Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+    // Store 4x4 block
+    vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(offset(&dst, 0, 0)));
+    vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(offset(&dst, 0, 1)));
+    vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(offset(&dst, 0, 2)));
+    vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(offset(&dst, 0, 3)));
+}
+#endif // defined(COLS_B) && defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(TRANSPOSE1XW_WIDTH_STEP)
 
 #if defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_Y) && defined(COLS_A)
 #define VECTOR_UCHAR VEC_DATA_TYPE(uchar, NUM_ELEMS_PROCESSED_PER_THREAD_X)
@@ -788,39 +1091,39 @@
 {
     Tensor3D mm_result = CONVERT_TO_TENSOR3D_STRUCT(mm_result);
 
-    int16 a_offset_s32 = (int16)0;
-    int16 b_offset_s32 = (int16)0;
+    int4 a_offset_s32 = (int4)0;
+    int4 b_offset_s32 = (int4)0;
 
 #if defined(A_OFFSET)
     Image sum_col = CONVERT_TO_IMAGE_STRUCT(sum_col);
 
     // Compute the offset contribution due to A_OFFSET
 #if defined(SUM_COL_HAS_BATCHES)
-    a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr + get_global_id(2) * sum_col_stride_y));
+    a_offset_s32 = vload4(0, (__global int *)(sum_col.ptr + get_global_id(2) * sum_col_stride_y));
 #else  // defined(MATRIX_B_HAS_BATCHES)
-    a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr));
+    a_offset_s32 = vload4(0, (__global int *)(sum_col.ptr));
 #endif // defined(MATRIX_B_HAS_BATCHES)
 
-    a_offset_s32 *= (int16)A_OFFSET;
+    a_offset_s32 *= (int4)A_OFFSET;
 #endif // defined(A_OFFSET)
 
 #if defined(B_OFFSET)
     Image sum_row = CONVERT_TO_IMAGE_STRUCT(sum_row);
 
     // Compute the offset contribution due to B_OFFSET
-    b_offset_s32 = (int16) * (((__global int *)(sum_row.ptr + get_global_id(2) * sum_row_stride_y)) + get_global_id(1));
-    b_offset_s32 *= (int16)B_OFFSET;
+    b_offset_s32 = (int4) * (((__global int *)(sum_row.ptr + get_global_id(2) * sum_row_stride_y)) + get_global_id(1));
+    b_offset_s32 *= (int4)B_OFFSET;
 #endif // defined(B_OFFSET)
 
-    const int16 offset_term_s32 = (int16)K_OFFSET + a_offset_s32 + b_offset_s32;
+    const int4 offset_term_s32 = (int4)K_OFFSET + a_offset_s32 + b_offset_s32;
 
-    int16 in_s32 = vload16(0, (__global int *)mm_result.ptr);
+    int4 in_s32 = vload4(0, (__global int *)mm_result.ptr);
 
     // Add the offset terms to GEMM's result
     in_s32 += offset_term_s32;
 
     // Store the result with the offset contribution
-    vstore16(in_s32, 0, (__global int *)mm_result.ptr);
+    vstore4(in_s32, 0, (__global int *)mm_result.ptr);
 }
 #endif // defined(K_OFFSET)
 
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
index 2f96724..ae498ec 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
@@ -24,6 +24,7 @@
 #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h"
 
 #include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/AccessWindowTranspose.h"
 #include "arm_compute/core/CL/CLHelpers.h"
 #include "arm_compute/core/CL/CLKernelLibrary.h"
 #include "arm_compute/core/CL/ICLTensor.h"
@@ -34,6 +35,7 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "support/ToolchainSupport.h"
 
 #include <cstddef>
@@ -41,6 +43,7 @@
 #include <tuple>
 
 using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
 
 namespace arm_compute
 {
@@ -51,14 +54,53 @@
 {
 using ElementsProcessed = Steps;
 
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed)
+Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QASYMM8);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
+
     if(!is_interleaved_transposed)
     {
         ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != input1->dimension(1));
+
+        if(output->total_size() != 0)
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(0) != output->dimension(0));
+            ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != output->dimension(1));
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+        }
+    }
+    else
+    {
+        const int m                         = reshape_info.m();
+        const int n                         = reshape_info.n();
+        const int k                         = reshape_info.k();
+        const int mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
+        const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+
+        TensorShape tensor_shape0{ input0->tensor_shape() };
+        tensor_shape0.set(0, k);
+        tensor_shape0.set(1, m);
+
+        TensorShape tensor_shape1{ input1->tensor_shape() };
+        tensor_shape1.set(0, n);
+        tensor_shape1.set(1, k);
+
+        const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
+        const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
+
+        const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height));
+        const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width));
+
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
+
+        if(output->total_size() != 0)
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != static_cast<size_t>(n));
+            ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != static_cast<size_t>(m));
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+        }
     }
 
     return Status{};
@@ -76,16 +118,14 @@
     // Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication
     if(is_interleaved_transposed)
     {
-        // Configure window
-        num_elems_processed_per_iteration_x                        = 16;
-        num_elems_processed_per_iteration_y                        = 4;
-        constexpr unsigned int num_elems_read_per_iteration_input0 = 4;
-        constexpr unsigned int num_elems_read_per_iteration_input1 = 16;
+        // Configure kernel window
+        num_elems_processed_per_iteration_x = 4;
+        num_elems_processed_per_iteration_y = 4;
 
         win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
 
-        AccessWindowRectangle input0_access(input0, 0, 0, num_elems_read_per_iteration_input0, 1);
-        AccessWindowRectangle input1_access(input1, 0, 0, num_elems_read_per_iteration_input1, 1);
+        AccessWindowRectangle input0_access(input0, 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f);
+        AccessWindowTranspose input1_access(input1, 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f);
         AccessWindowRectangle output_access(output, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
 
         window_changed = update_window_and_padding(win, input0_access, input1_access, output_access);
@@ -122,10 +162,18 @@
 {
 }
 
-void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed)
+void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), is_interleaved_transposed));
+
+    // Output tensor auto inizialitation if not yet initialized
+    TensorShape tensor_shape{ input0->info()->tensor_shape() };
+    tensor_shape.set(0, is_interleaved_transposed ? reshape_info.n() : input1->info()->dimension(0));
+    tensor_shape.set(1, is_interleaved_transposed ? reshape_info.m() : input0->info()->dimension(1));
+
+    auto_init_if_empty(*output->info(), tensor_shape, 1, DataType::S32, 1, QuantizationInfo());
+
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), is_interleaved_transposed, reshape_info));
 
     _input0 = input0;
     _input1 = input1;
@@ -146,8 +194,18 @@
     std::string    kernel_name(" ");
     if(is_interleaved_transposed)
     {
+        const int mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
+        const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+
+        // Note: The computation tile has the x dimension equal to 4 which is less than the transpose_width (16)
+        //        In order to access correctly the elements from the transposed matrix B, we need to pass
+        //        the correct step which is calculated as (16 * mult_transpose1xW_width) / 4)
+
         build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0)));
-        kernel_name = "gemmlowp_mm_interleaved_transposed";
+        build_opts.add_option("-DTRANSPOSE1XW_WIDTH_STEP=" + support::cpp11::to_string(4 * mult_transpose1xW_width));
+        build_opts.add_option("-DMULT_INTERLEAVE4X4_HEIGHT=" + support::cpp11::to_string(mult_interleave4x4_height));
+
+        kernel_name = "gemmlowp_mm_interleaved_transposed_" + string_from_target(arch_target);
     }
     else
     {
@@ -171,10 +229,10 @@
     _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
 }
 
-Status CLGEMMLowpMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed)
+Status CLGEMMLowpMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
 {
     ElementsProcessed num_elements_processed{};
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, is_interleaved_transposed));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, is_interleaved_transposed, reshape_info));
     ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
                                                               input1->clone().get(),
                                                               output->clone().get(),
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
index d05939f..221a156 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -91,7 +91,7 @@
 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row,
                                                         int32_t a_offset, int32_t b_offset)
 {
-    constexpr unsigned int num_elems_processed_per_iteration = 16;
+    constexpr unsigned int num_elems_processed_per_iteration = 4;
     bool                   window_changed                    = false;
 
     // Configure kernel window
@@ -160,6 +160,14 @@
                                                     a_offset, b_offset); // NOLINT
     ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
     ICLKernel::configure(win_config.second);
+
+    // Set config_id for enabling LWS tuning
+    _config_id = "gemmlowp_offset_contribution_";
+    _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
 }
 
 Status CLGEMMLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row,
diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
index 63aed6d..24d2187 100644
--- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
@@ -113,6 +113,7 @@
 
     // Create build options
     CLBuildOptions build_opts;
+    build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(input->info()->element_size()));
     build_opts.add_option("-DTRANSPOSE_W=" + support::cpp11::to_string(num_elems_processed_per_iteration));
     build_opts.add_option("-DMULT_TRANSPOSE1XW_WIDTH=" + support::cpp11::to_string(mult_transpose1xW_width));