Rename [Cl|Cpu]GemmConvolution to [Cl|Gpu]GemmConv2d

Renaming the gemm-based convolution operators to accomodate for new
operators with higher convolution dimensonality

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Id2f2cf11404221f0e87baa0e5d08ad5d63eaf78e
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6113
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 75ca77d..563dbd4 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -32,7 +32,7 @@
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "src/core/helpers/MemoryHelpers.h"
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
 #include "support/Cast.h"
 
 #include <cmath>
@@ -47,15 +47,15 @@
 
 struct CLGEMMConvolutionLayer::Impl
 {
-    const ITensor                             *weights{ nullptr };
-    std::unique_ptr<opencl::ClGemmConvolution> op{ nullptr };
-    ITensorPack                                run_pack{};
-    ITensorPack                                prep_pack{};
-    MemoryGroup                                memory_group{};
-    IWeightsManager                           *weights_manager{ nullptr };
-    MemoryRequirements                         aux_mem_req{};
-    WorkspaceData<CLTensor>                    workspace_tensors{};
-    bool                                       is_prepared{ false };
+    const ITensor                        *weights{ nullptr };
+    std::unique_ptr<opencl::ClGemmConv2d> op{ nullptr };
+    ITensorPack                           run_pack{};
+    ITensorPack                           prep_pack{};
+    MemoryGroup                           memory_group{};
+    IWeightsManager                      *weights_manager{ nullptr };
+    MemoryRequirements                    aux_mem_req{};
+    WorkspaceData<CLTensor>               workspace_tensors{};
+    bool                                  is_prepared{ false };
 };
 
 CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
@@ -79,7 +79,7 @@
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
     _impl->weights               = weights;
-    _impl->op                    = std::make_unique<opencl::ClGemmConvolution>();
+    _impl->op                    = std::make_unique<opencl::ClGemmConv2d>();
     const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
     _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv2d_info, weights_info);
 
@@ -103,7 +103,7 @@
                                         const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
 {
     const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
-    return opencl::ClGemmConvolution::validate(input, weights, biases, output, conv2d_info, weights_info);
+    return opencl::ClGemmConv2d::validate(input, weights, biases, output, conv2d_info, weights_info);
 }
 
 void CLGEMMConvolutionLayer::run()
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 6e25b42..8bd1119 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -30,7 +30,7 @@
 #include "src/core/helpers/MemoryHelpers.h"
 #include "src/runtime/cpu/operators/CpuConv2d.h"
 #include "src/runtime/cpu/operators/CpuDirectConv2d.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
 #include "src/runtime/cpu/operators/CpuGemmDirectConv2d.h"
 #include "src/runtime/cpu/operators/CpuWinogradConv2d.h"
 
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index c32584e..47ab168 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -28,7 +28,7 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/Tensor.h"
 #include "src/core/helpers/MemoryHelpers.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
 
 using namespace arm_compute::experimental;
 
@@ -36,14 +36,14 @@
 {
 struct NEGEMMConvolutionLayer::Impl
 {
-    const ITensor                           *weights{ nullptr };
-    std::unique_ptr<cpu::CpuGemmConvolution> op{ nullptr };
-    ITensorPack                              run_pack{};
-    MemoryGroup                              memory_group{};
-    IWeightsManager                         *weights_manager{ nullptr };
-    MemoryRequirements                       aux_mem_req{};
-    WorkspaceData<Tensor>                    workspace_tensors{};
-    bool                                     is_prepared{ false };
+    const ITensor                      *weights{ nullptr };
+    std::unique_ptr<cpu::CpuGemmConv2d> op{ nullptr };
+    ITensorPack                         run_pack{};
+    MemoryGroup                         memory_group{};
+    IWeightsManager                    *weights_manager{ nullptr };
+    MemoryRequirements                  aux_mem_req{};
+    WorkspaceData<Tensor>               workspace_tensors{};
+    bool                                is_prepared{ false };
 };
 
 NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager, IWeightsManager *weights_manager)
@@ -59,7 +59,7 @@
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
     _impl->weights = weights;
-    _impl->op      = std::make_unique<cpu::CpuGemmConvolution>();
+    _impl->op      = std::make_unique<cpu::CpuGemmConv2d>();
     _impl->op->configure(input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
 
     _impl->run_pack =
@@ -76,7 +76,7 @@
 Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
                                         const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
 {
-    return cpu::CpuGemmConvolution::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
+    return cpu::CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
 }
 
 void NEGEMMConvolutionLayer::run()
diff --git a/src/runtime/cpu/operators/CpuConv2d.cpp b/src/runtime/cpu/operators/CpuConv2d.cpp
index 809663a..cff9238 100644
--- a/src/runtime/cpu/operators/CpuConv2d.cpp
+++ b/src/runtime/cpu/operators/CpuConv2d.cpp
@@ -26,7 +26,7 @@
 #include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
 #include "src/runtime/cpu/operators/CpuDirectConv2d.h"
 #include "src/runtime/cpu/operators/CpuGemm.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
 #include "src/runtime/cpu/operators/CpuGemmDirectConv2d.h"
 #include "src/runtime/cpu/operators/CpuWinogradConv2d.h"
 
@@ -62,7 +62,7 @@
         }
         case ConvolutionMethod::GEMM:
         {
-            auto f = std::make_unique<CpuGemmConvolution>();
+            auto f = std::make_unique<CpuGemmConv2d>();
             f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math);
             _function = std::move(f);
             break;
@@ -101,7 +101,7 @@
             ARM_COMPUTE_RETURN_ON_ERROR(CpuWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
             break;
         case ConvolutionMethod::GEMM:
-            ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmConvolution::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math));
+            ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math));
             break;
         case ConvolutionMethod::GEMM_CONV2D:
             ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmDirectConv2d::validate(input, weights, biases, output, info));
diff --git a/src/runtime/cpu/operators/CpuGemmConvolution.cpp b/src/runtime/cpu/operators/CpuGemmConv2d.cpp
similarity index 91%
rename from src/runtime/cpu/operators/CpuGemmConvolution.cpp
rename to src/runtime/cpu/operators/CpuGemmConv2d.cpp
index 81d656c..a81dd8a 100644
--- a/src/runtime/cpu/operators/CpuGemmConvolution.cpp
+++ b/src/runtime/cpu/operators/CpuGemmConv2d.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
 
 #include "arm_compute/core/Size2D.h"
 #include "arm_compute/core/TensorInfo.h"
@@ -51,15 +51,15 @@
 {
 namespace cpu
 {
-CpuGemmConvolution::CpuGemmConvolution()
+CpuGemmConv2d::CpuGemmConv2d()
     : _weights_reshape_kernel(nullptr), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(), _col2im_kernel(), _reshape_kernel(), _im2col_output(), _weights_reshaped(), _gemm_output(), _gemm_output_3d(),
       _data_layout(DataLayout::NCHW), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
 {
 }
-CpuGemmConvolution::~CpuGemmConvolution() = default;
+CpuGemmConv2d::~CpuGemmConv2d() = default;
 
-void CpuGemmConvolution::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act_info,
-                                      bool enable_fast_math, int gemm_3d_depth)
+void CpuGemmConv2d::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act_info,
+                                 bool enable_fast_math, int gemm_3d_depth)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
     ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, act_info, enable_fast_math, gemm_3d_depth, _skip_im2col));
@@ -137,8 +137,8 @@
     }
 }
 
-Status CpuGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
-                                       const ActivationLayerInfo &act_info, bool enable_fast_math, int gemm_3d_depth, bool skip_im2col)
+Status CpuGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+                                  const ActivationLayerInfo &act_info, bool enable_fast_math, int gemm_3d_depth, bool skip_im2col)
 {
     const DataType data_type             = src->data_type();
     const bool     is_quantized          = is_data_type_quantized_asymmetric(data_type);
@@ -197,7 +197,7 @@
     }
 }
 
-Status CpuGemmConvolution::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
+Status CpuGemmConv2d::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
 {
     const DataType     data_type = input_info->data_type();
     const unsigned int mult_y    = skip_im2col ? 1U : gemm_3d_depth;
@@ -211,21 +211,21 @@
     return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, false, gemm_3d_depth, skip_im2col);
 }
 
-void CpuGemmConvolution::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                   const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+void CpuGemmConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+                              const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
     ARM_COMPUTE_UNUSED(num_groups, weights_info);
-    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmConvolution::validate(src,
-                                                            weights,
-                                                            biases,
-                                                            dst,
-                                                            conv_info,
-                                                            weights_info,
-                                                            dilation,
-                                                            act_info,
-                                                            enable_fast_math,
-                                                            num_groups));
+    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmConv2d::validate(src,
+                                                       weights,
+                                                       biases,
+                                                       dst,
+                                                       conv_info,
+                                                       weights_info,
+                                                       dilation,
+                                                       act_info,
+                                                       enable_fast_math,
+                                                       num_groups));
 
     const DataType   data_type   = src->data_type();
     const DataLayout data_layout = src->data_layout();
@@ -353,8 +353,8 @@
     _aux_mem[GemmOutput]      = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
 }
 
-Status CpuGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
-                                    const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+Status CpuGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+                               const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -489,7 +489,7 @@
     return Status{};
 }
 
-void CpuGemmConvolution::run(ITensorPack &tensors)
+void CpuGemmConv2d::run(ITensorPack &tensors)
 {
     prepare(tensors);
 
@@ -581,7 +581,7 @@
     }
 }
 
-void CpuGemmConvolution::prepare(ITensorPack &tensors)
+void CpuGemmConv2d::prepare(ITensorPack &tensors)
 {
     if(!_is_prepared)
     {
@@ -604,7 +604,7 @@
         _is_prepared = true;
     }
 }
-experimental::MemoryRequirements CpuGemmConvolution::workspace() const
+experimental::MemoryRequirements CpuGemmConv2d::workspace() const
 {
     return _aux_mem;
 }
diff --git a/src/runtime/cpu/operators/CpuGemmConvolution.h b/src/runtime/cpu/operators/CpuGemmConv2d.h
similarity index 95%
rename from src/runtime/cpu/operators/CpuGemmConvolution.h
rename to src/runtime/cpu/operators/CpuGemmConv2d.h
index 7755bbe..5292565 100644
--- a/src/runtime/cpu/operators/CpuGemmConvolution.h
+++ b/src/runtime/cpu/operators/CpuGemmConv2d.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CPU_GEMMCONVOLUTION_H
-#define ARM_COMPUTE_CPU_GEMMCONVOLUTION_H
+#ifndef ARM_COMPUTE_CPU_GEMM_CONV2D_H
+#define ARM_COMPUTE_CPU_GEMM_CONV2D_H
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Types.h"
@@ -55,21 +55,21 @@
  * -# @ref kernels::CpuWeightsReshapeKernel
  *
  */
-class CpuGemmConvolution : public ICpuOperator
+class CpuGemmConv2d : public ICpuOperator
 {
 public:
     /** Constructor */
-    CpuGemmConvolution();
+    CpuGemmConv2d();
     /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CpuGemmConvolution(const CpuGemmConvolution &) = delete;
+    CpuGemmConv2d(const CpuGemmConv2d &) = delete;
     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
-    CpuGemmConvolution(CpuGemmConvolution &&) = delete;
+    CpuGemmConv2d(CpuGemmConv2d &&) = delete;
     /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CpuGemmConvolution &operator=(const CpuGemmConvolution &) = delete;
+    CpuGemmConv2d &operator=(const CpuGemmConv2d &) = delete;
     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
-    CpuGemmConvolution &operator=(CpuGemmConvolution &&) = delete;
+    CpuGemmConv2d &operator=(CpuGemmConv2d &&) = delete;
     /** Destructor */
-    ~CpuGemmConvolution();
+    ~CpuGemmConv2d();
     /** Set the input and output tensors.
      *
      * Valid data layouts:
@@ -200,4 +200,4 @@
 };
 } // namespace cpu
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_GEMMCONVOLUTION_H */
+#endif /* ARM_COMPUTE_CPU_GEMM_CONV2D_H */
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.cpp b/src/runtime/gpu/cl/operators/ClConv2d.cpp
index 2f4d673..0cb3a96 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.cpp
+++ b/src/runtime/gpu/cl/operators/ClConv2d.cpp
@@ -31,7 +31,7 @@
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
 #include "src/runtime/gpu/cl/operators/ClDirectConv2d.h"
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
 #include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
 
 #include <memory>
@@ -104,7 +104,7 @@
         }
         case ConvolutionMethod::GEMM:
         {
-            auto f = std::make_unique<ClGemmConvolution>();
+            auto f = std::make_unique<ClGemmConv2d>();
             f->configure(compile_context, src, weights, biases, dst, conv2d_info, weights_info);
             _operator = std::move(f);
             break;
@@ -143,7 +143,7 @@
         case ConvolutionMethod::GEMM:
         {
             // Validate gemm-based convolution layer
-            ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConvolution::validate(src, weights, biases, dst, conv2d_info, weights_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConv2d::validate(src, weights, biases, dst, conv2d_info, weights_info));
             break;
         }
         default:
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.h b/src/runtime/gpu/cl/operators/ClConv2d.h
index 0888c2f..cdf3b7d 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.h
+++ b/src/runtime/gpu/cl/operators/ClConv2d.h
@@ -36,7 +36,7 @@
 {
 /** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
  *
- * -# @ref opencl::ClGemmConvolution
+ * -# @ref opencl::ClGemmConv2d
  * -# @ref opencl::ClWinogradConv2d
  * -# @ref opencl::ClDirectConv2d
  * -# @ref CLFFTConvolutionLayer
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
similarity index 93%
rename from src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
rename to src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
index 1926cbb..8c796e0 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
 
 #include "arm_compute/core/CL/ICLTensor.h"
 #include "arm_compute/core/PixelValue.h"
@@ -50,16 +50,16 @@
 using namespace utils::cast;
 namespace opencl
 {
-ClGemmConvolution::ClGemmConvolution()
+ClGemmConv2d::ClGemmConv2d()
     : _weights_reshape_kernel(nullptr), _im2col_kernel(nullptr), _mm_gemm(nullptr), _mm_gemmlowp(nullptr), _col2im_kernel(nullptr), _activation_kernel(nullptr), _im2col_output(), _weights_reshaped(),
       _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
 {
 }
-ClGemmConvolution::~ClGemmConvolution() = default;
+ClGemmConv2d::~ClGemmConv2d() = default;
 
-void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
-                                     const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
-                                     int gemm_3d_depth, const ActivationLayerInfo &act_info)
+void ClGemmConv2d::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+                                const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+                                int gemm_3d_depth, const ActivationLayerInfo &act_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
     ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
@@ -112,8 +112,8 @@
     }
 }
 
-Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
-                                      const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
+Status ClGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+                                 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
 {
     const bool is_quantized = is_data_type_quantized_asymmetric(src->data_type());
 
@@ -151,14 +151,14 @@
     }
 }
 
-void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
-                                  const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
+void ClGemmConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+                             const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
 
-    ARM_COMPUTE_ERROR_THROW_ON(ClGemmConvolution::validate(src, weights, biases, dst,
-                                                           conv2d_info,
-                                                           weights_info));
+    ARM_COMPUTE_ERROR_THROW_ON(ClGemmConv2d::validate(src, weights, biases, dst,
+                                                      conv2d_info,
+                                                      weights_info));
 
     const DataType   data_type   = src->data_type();
     const DataLayout data_layout = src->data_layout();
@@ -334,8 +334,8 @@
     _aux_mem[GemmOutput]      = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
 }
 
-Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
-                                   const WeightsInfo &weights_info)
+Status ClGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
+                              const WeightsInfo &weights_info)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -371,8 +371,8 @@
     const bool         is_quantized       = is_data_type_quantized_asymmetric(data_type);
     const bool         skip_im2col        = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv2d_info.conv_info.stride().first == 1
                                              && conv2d_info.conv_info.stride().second == 1);
-    const bool         skip_col2im        = data_layout == DataLayout::NHWC;
-    bool               fuse_activation    = true;
+    const bool skip_col2im     = data_layout == DataLayout::NHWC;
+    bool       fuse_activation = true;
 
     ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * conv2d_info.num_groups) != src->dimension(idx_channel));
     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
@@ -521,7 +521,7 @@
     return Status{};
 }
 
-void ClGemmConvolution::run(ITensorPack &tensors)
+void ClGemmConv2d::run(ITensorPack &tensors)
 {
     prepare(tensors);
 
@@ -593,7 +593,7 @@
     }
 }
 
-void ClGemmConvolution::prepare(ITensorPack &tensors)
+void ClGemmConv2d::prepare(ITensorPack &tensors)
 {
     if(!_is_prepared)
     {
@@ -620,7 +620,7 @@
         _is_prepared = true;
     }
 }
-experimental::MemoryRequirements ClGemmConvolution::workspace() const
+experimental::MemoryRequirements ClGemmConv2d::workspace() const
 {
     return _aux_mem;
 }
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.h b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
similarity index 94%
rename from src/runtime/gpu/cl/operators/ClGemmConvolution.h
rename to src/runtime/gpu/cl/operators/ClGemmConv2d.h
index 444516e..e16d029 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.h
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CL_GEMMCONVOLUTION_H
-#define ARM_COMPUTE_CL_GEMMCONVOLUTION_H
+#ifndef ARM_COMPUTE_CL_GEMM_CONV2D_H
+#define ARM_COMPUTE_CL_GEMM_CONV2D_H
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Types.h"
@@ -55,21 +55,21 @@
  * -# @ref opencl::kernels::ClCol2ImKernel (if NCHW data layout)
  * -# @ref opencl::kernels::ClActivationKernel
  */
-class ClGemmConvolution : public IClOperator
+class ClGemmConv2d : public IClOperator
 {
 public:
     /** Constructor */
-    ClGemmConvolution();
+    ClGemmConv2d();
     /** Prevent instances of this class from being copied (As this class contains pointers) */
-    ClGemmConvolution(const ClGemmConvolution &) = delete;
+    ClGemmConv2d(const ClGemmConv2d &) = delete;
     /** Default move constructor */
-    ClGemmConvolution(ClGemmConvolution &&) = default;
+    ClGemmConv2d(ClGemmConv2d &&) = default;
     /** Prevent instances of this class from being copied (As this class contains pointers) */
-    ClGemmConvolution &operator=(const ClGemmConvolution &) = delete;
+    ClGemmConv2d &operator=(const ClGemmConv2d &) = delete;
     /** Default move assignment operator */
-    ClGemmConvolution &operator=(ClGemmConvolution &&) = default;
+    ClGemmConv2d &operator=(ClGemmConv2d &&) = default;
     /**Default destructor */
-    ~ClGemmConvolution();
+    ~ClGemmConv2d();
     /** Set the input and output tensors.
      *
      * Valid data layouts:
@@ -182,4 +182,4 @@
 };
 } // namespace opencl
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMMCONVOLUTION_H */
+#endif /* ARM_COMPUTE_CL_GEMM_CONV2D_H */