COMPMID-1836: Remove CLGEMMTranspose1xWKernel and replace with CLGEMMReshapeRHSMatrixKernel

Change-Id: Ic5a4f32657a155380684dcd4b44fbb608ef40cb4
Reviewed-on: https://review.mlplatform.org/418
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 924fb1d..18ef185 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -148,7 +148,7 @@
     const GPUTarget gpu_target  = get_target_from_name(device_name);
 
     // SW_WORKAROUND: Workaround for DDK revision r14p0.to enable cl_arm_integer_dot_product_int8
-    std::set<GPUTarget> sw_workaround_issue = {GPUTarget::G76};
+    std::set<GPUTarget> sw_workaround_issue = { GPUTarget::G76 };
     return (device_supports_extension(device, "cl_arm_integer_dot_product_int8") || sw_workaround_issue.count(gpu_target) != 0);
 }
 
@@ -255,5 +255,4 @@
             return 1;
     }
 }
-
 } // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
index b2fb3e0..66fafe4 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
@@ -71,11 +71,17 @@
     }
     else
     {
-        const int m                         = reshape_info.m();
-        const int n                         = reshape_info.n();
-        const int k                         = reshape_info.k();
-        const int mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
-        const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+        GEMMRHSMatrixInfo rhs_info;
+        const int         m                         = reshape_info.m();
+        const int         n                         = reshape_info.n();
+        const int         k                         = reshape_info.k();
+        const int         mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
+        const int         mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+        rhs_info.n0                                 = 16 / input1->element_size();
+        rhs_info.k0                                 = 1;
+        rhs_info.h0                                 = mult_transpose1xW_width;
+        rhs_info.interleave                         = false;
+        rhs_info.transpose                          = false;
 
         TensorShape tensor_shape0{ input0->tensor_shape() };
         tensor_shape0.set(0, k);
@@ -89,7 +95,7 @@
         const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
 
         const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height));
-        const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width));
+        const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
 
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index c9ed776..69455cf 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -66,11 +66,17 @@
     }
     else
     {
-        const int m                         = reshape_info.m();
-        const int n                         = reshape_info.n();
-        const int k                         = reshape_info.k();
-        const int mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
-        const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+        GEMMRHSMatrixInfo rhs_info;
+        const int         m                         = reshape_info.m();
+        const int         n                         = reshape_info.n();
+        const int         k                         = reshape_info.k();
+        const int         mult_transpose1xW_width   = reshape_info.mult_transpose1xW_width();
+        const int         mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
+        rhs_info.n0                                 = 16 / input1->element_size();
+        rhs_info.k0                                 = 1;
+        rhs_info.h0                                 = mult_transpose1xW_width;
+        rhs_info.interleave                         = false;
+        rhs_info.transpose                          = false;
 
         TensorShape tensor_shape0{ input0->tensor_shape() };
         tensor_shape0.set(0, k);
@@ -84,7 +90,7 @@
         const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
 
         const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height));
-        const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width));
+        const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
 
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index d0db876..9048b85 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -118,7 +118,6 @@
 CLGEMM::CLGEMM(std::shared_ptr<IMemoryManager> memory_manager)
     : _memory_group(std::move(memory_manager)),
       _interleave_kernel(),
-      _transpose_kernel(),
       _mm_kernel(),
       _ma_kernel(),
       _reshape_lhs_kernel(),
@@ -174,13 +173,18 @@
         mult_transpose1xW_width   = 4;
         mult_interleave4x4_height = 2;
     }
+    GEMMRHSMatrixInfo rhs_info;
+    rhs_info.n0         = 16 / b->info()->element_size();
+    rhs_info.k0         = 1;
+    rhs_info.h0         = mult_transpose1xW_width;
+    rhs_info.interleave = false;
+    rhs_info.transpose  = false;
 
     // Check if we need to reshape the matrix A and matrix B
     _is_interleaved_transposed = is_interleaved_transposed(m, n, k, a->info()->data_type(), _reshape_b_only_on_first_run, gpu_target);
 
     // Check if we can run the new reshaped GEMM
     _is_G76_path = (gpu_target == GPUTarget::G76) && _is_interleaved_transposed && (data_type == DataType::F32);
-    ;
 
     // if _is_interleaved_transposed is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D
     if(_is_interleaved_transposed)
@@ -201,7 +205,6 @@
         if(_is_G76_path)
         {
             GEMMLHSMatrixInfo lhs_info;
-            GEMMRHSMatrixInfo rhs_info;
 
             // Pick up the GEMM configuration based on M,N and K
             select_gemm_configuration(m, n, lhs_info, rhs_info);
@@ -219,7 +222,7 @@
             _interleave_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d());
 
             // Configure transpose kernel
-            _transpose_kernel.configure(b, &_tmp_b, mult_transpose1xW_width);
+            _reshape_rhs_kernel.configure(b, &_tmp_b, rhs_info);
         }
     }
 
@@ -286,6 +289,13 @@
         mult_interleave4x4_height = 2;
     }
 
+    GEMMRHSMatrixInfo rhs_info;
+    rhs_info.n0         = 16 / b->element_size();
+    rhs_info.k0         = 1;
+    rhs_info.h0         = mult_transpose1xW_width;
+    rhs_info.interleave = false;
+    rhs_info.transpose  = false;
+
     // Check if we need to reshape the matrix A and matrix B
     const bool run_interleave_transpose = is_interleaved_transposed(m, n, k, a->data_type(), reshape_b_only_on_first_run, gpu_target);
 
@@ -308,7 +318,6 @@
         if(is_G76_path)
         {
             GEMMLHSMatrixInfo lhs_info;
-            GEMMRHSMatrixInfo rhs_info;
 
             // Pick up the GEMM configuration based on M,N and K
             select_gemm_configuration(m, n, lhs_info, rhs_info);
@@ -328,10 +337,9 @@
             // Validate interleave kernel
             auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &tmp_a_info, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()));
-
             // Validate transpose kernel
-            auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
-            ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width));
+            auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
+            ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
         }
     }
 
@@ -371,14 +379,7 @@
         if(!_reshape_b_only_on_first_run)
         {
             // Run transpose kernel
-            if(_is_G76_path)
-            {
-                CLScheduler::get().enqueue(_reshape_rhs_kernel, false);
-            }
-            else
-            {
-                CLScheduler::get().enqueue(_transpose_kernel, false);
-            }
+            CLScheduler::get().enqueue(_reshape_rhs_kernel, false);
         }
     }
 
@@ -409,14 +410,7 @@
         {
             // Run transpose kernel and mark original weights tensor as unused
             _tmp_b.allocator()->allocate();
-            if(_is_G76_path)
-            {
-                CLScheduler::get().enqueue(_reshape_rhs_kernel, false);
-            }
-            else
-            {
-                CLScheduler::get().enqueue(_transpose_kernel, false);
-            }
+            CLScheduler::get().enqueue(_reshape_rhs_kernel, false);
             _original_b->mark_as_unused();
         }
         CLScheduler::get().queue().finish();
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 2d4d231..cf20bc6 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -108,6 +108,7 @@
 
     const ICLTensor *matrix_a = a;
     const ICLTensor *matrix_b = b;
+    GEMMRHSMatrixInfo rhs_info;
 
     // Arguments used by GEMMReshapeInfo
     // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo
@@ -120,6 +121,11 @@
     const int     depth_output_gemm3d       = gemm_info.depth_output_gemm3d();
     constexpr int mult_transpose1xW_width   = 1;
     constexpr int mult_interleave4x4_height = 1;
+    rhs_info.n0                             = 16 / b->info()->element_size();
+    rhs_info.k0                             = 1;
+    rhs_info.h0                             = mult_transpose1xW_width;
+    rhs_info.interleave                     = false;
+    rhs_info.transpose                      = false;
 
     // Check if we need to reshape the matrix A and matrix B
     _is_interleaved_transposed = is_interleaved_transposed(m, n, k, _reshape_b_only_on_first_run, gpu_target);
@@ -142,7 +148,7 @@
         _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d(), unroll_block);
 
         // Configure transpose kernel
-        _mtx_b_reshape_kernel.configure(b, &_tmp_b, mult_transpose1xW_width);
+        _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info);
     }
 
     // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
@@ -233,8 +239,9 @@
     const ITensorInfo *matrix_a_info = a;
     const ITensorInfo *matrix_b_info = b;
 
-    TensorInfo tmp_a_info{};
-    TensorInfo tmp_b_info{};
+    TensorInfo        tmp_a_info{};
+    TensorInfo        tmp_b_info{};
+    GEMMRHSMatrixInfo rhs_info;
 
     bool          reinterpret_input_as_3d   = gemm_info.reinterpret_input_as_3d();
     const int     m                         = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
@@ -243,6 +250,11 @@
     constexpr int mult_transpose1xW_width   = 1;
     constexpr int mult_interleave4x4_height = 1;
     const int     depth_output_gemm3d       = gemm_info.depth_output_gemm3d();
+    rhs_info.n0                             = 16 / b->element_size();
+    rhs_info.k0                             = 1;
+    rhs_info.h0                             = mult_transpose1xW_width;
+    rhs_info.interleave                     = false;
+    rhs_info.transpose                      = false;
 
     bool reshape_matrices = is_interleaved_transposed(m, n, k, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target());
 
@@ -264,8 +276,9 @@
         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &tmp_a_info, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()));
 
         // Validate transpose kernel
-        auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
-        ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width));
+
+        auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
+        ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
     }
 
     TensorInfo info_vector_sum_col, info_vector_sum_row;