Rename Quantization/Dequantization kernels/operators to imperative mood

Renames the following kernels/functions
 - [Cl|Cpu]DequantizationKernel -> [Cl|Cpu]DequantizeKernel
 - [Cl|Cpu]Dequantization -> [Cl|Cpu]CpuDequantize
 - [Cl|Cpu]QuantizationKernel -> [Cl|Cpu]QuantizeKernel
 - [Cl|Cpu]Quantization -> [Cl|Cpu]Quantize

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ic3c5eb3b7fe28f807294d159830eef99c2dd6219
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5566
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.cpp b/src/core/cpu/kernels/CpuDequantizeKernel.cpp
similarity index 96%
rename from src/core/cpu/kernels/CpuDequantizationKernel.cpp
rename to src/core/cpu/kernels/CpuDequantizeKernel.cpp
index 2aa9fb9..42b5439 100644
--- a/src/core/cpu/kernels/CpuDequantizationKernel.cpp
+++ b/src/core/cpu/kernels/CpuDequantizeKernel.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/core/cpu/kernels/CpuDequantizationKernel.h"
+#include "src/core/cpu/kernels/CpuDequantizeKernel.h"
 
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
@@ -349,7 +349,7 @@
 }
 } // namespace
 
-void CpuDequantizationKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+void CpuDequantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
 
@@ -362,13 +362,13 @@
     ICpuKernel::configure(win);
 }
 
-Status CpuDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
     return Status{};
 }
 
-void CpuDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+void CpuDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
@@ -391,9 +391,9 @@
             ARM_COMPUTE_ERROR("Unsupported data type.");
     }
 }
-const char *CpuDequantizationKernel::name() const
+const char *CpuDequantizeKernel::name() const
 {
-    return "CpuDequantizationKernel";
+    return "CpuDequantizeKernel";
 }
 } // namespace kernels
 } // namespace cpu
diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.h b/src/core/cpu/kernels/CpuDequantizeKernel.h
similarity index 79%
rename from src/core/cpu/kernels/CpuDequantizationKernel.h
rename to src/core/cpu/kernels/CpuDequantizeKernel.h
index 8ac8070..798f32c 100644
--- a/src/core/cpu/kernels/CpuDequantizationKernel.h
+++ b/src/core/cpu/kernels/CpuDequantizeKernel.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H
-#define ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H
+#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
 
 #include "src/core/common/Macros.h"
 #include "src/core/cpu/ICpuKernel.h"
@@ -34,22 +34,21 @@
 namespace kernels
 {
 /** Interface for the dequantization layer kernel. */
-class CpuDequantizationKernel : public ICpuKernel
+class CpuDequantizeKernel : public ICpuKernel
 {
 public:
     /** Default constructor */
-    CpuDequantizationKernel() = default;
-    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizationKernel);
+    CpuDequantizeKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizeKernel);
     /** Set input, output tensors.
      *
      * @param[in]  src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
      * @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32.
      */
     void configure(const ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantizationKernel
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
-     * @param[in] dst Destination tensor info. Data types supported: F16/F32.
+     * Similar to @ref CpuDequantizeKernel::configure()
      *
      * @return a status
      */
@@ -62,4 +61,4 @@
 } // namespace kernels
 } // namespace cpu
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H */
+#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.cpp b/src/core/cpu/kernels/CpuQuantizeKernel.cpp
similarity index 81%
rename from src/core/cpu/kernels/CpuQuantizationKernel.cpp
rename to src/core/cpu/kernels/CpuQuantizeKernel.cpp
index 9b1e017..8ca81e8 100644
--- a/src/core/cpu/kernels/CpuQuantizationKernel.cpp
+++ b/src/core/cpu/kernels/CpuQuantizeKernel.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/core/cpu/kernels/CpuQuantizationKernel.h"
+#include "src/core/cpu/kernels/CpuQuantizeKernel.h"
 
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
@@ -108,34 +108,29 @@
 
 } // namespace
 
-CpuQuantizationKernel::CpuQuantizationKernel()
-    : _func(nullptr)
-{
-}
-
-void CpuQuantizationKernel::configure(ITensorInfo *src, ITensorInfo *dst)
+void CpuQuantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
 
-    static const std::map<std::string, QuantizationFunctionExecutorPtr> quant_map =
+    static const std::map<std::string, QuantizeFunctionExecutorPtr> quant_map =
     {
-        { "op_QASYMM8_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, uint8_t> },
-        { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, int8_t> },
-        { "op_QASYMM8_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<uint8_t> },
+        { "op_QASYMM8_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, uint8_t> },
+        { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, int8_t> },
+        { "op_QASYMM8_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<uint8_t> },
 
-        { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, uint8_t> },
-        { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, int8_t> },
-        { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<int8_t> },
+        { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, uint8_t> },
+        { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, int8_t> },
+        { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<int8_t> },
 
-        { "op_F32_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float, uint8_t> },
-        { "op_F32_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float, int8_t> },
-        { "op_F32_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float> },
+        { "op_F32_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float, uint8_t> },
+        { "op_F32_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float, int8_t> },
+        { "op_F32_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float> },
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-        { "op_F16_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, uint8_t> },
-        { "op_F16_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, int8_t> },
-        { "op_F16_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float16_t> },
+        { "op_F16_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, uint8_t> },
+        { "op_F16_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, int8_t> },
+        { "op_F16_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float16_t> },
 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
     };
 
@@ -156,14 +151,14 @@
     ICpuKernel::configure(win_config);
 }
 
-Status CpuQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
     return Status{};
 }
 
 template <typename TIn, typename TOut>
-void CpuQuantizationKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+void CpuQuantizeKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
 {
     const auto window_start_x = static_cast<int>(window.x().start());
     const auto window_end_x   = static_cast<int>(window.x().end());
@@ -206,7 +201,7 @@
 }
 
 template <typename T>
-void CpuQuantizationKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+void CpuQuantizeKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
 {
     const auto window_start_x = static_cast<int>(window.x().start());
     const auto window_end_x   = static_cast<int>(window.x().end());
@@ -250,7 +245,7 @@
     input, output);
 }
 
-void CpuQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+void CpuQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
@@ -262,9 +257,9 @@
     (this->*_func)(src, dst, window);
 }
 
-const char *CpuQuantizationKernel::name() const
+const char *CpuQuantizeKernel::name() const
 {
-    return "CpuQuantizationKernel";
+    return "CpuQuantizeKernel";
 }
 } // namespace kernels
 } // namespace cpu
diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.h b/src/core/cpu/kernels/CpuQuantizeKernel.h
similarity index 77%
rename from src/core/cpu/kernels/CpuQuantizationKernel.h
rename to src/core/cpu/kernels/CpuQuantizeKernel.h
index 51d9a4e..d3422d3 100644
--- a/src/core/cpu/kernels/CpuQuantizationKernel.h
+++ b/src/core/cpu/kernels/CpuQuantizeKernel.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H
-#define ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H
+#ifndef ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H
 
 #include "src/core/common/Macros.h"
 #include "src/core/cpu/ICpuKernel.h"
@@ -36,14 +36,13 @@
 /** Interface for the quantization layer kernel.
  *
  * @note The implementation supports only 3D input tensors
- *
  */
-class CpuQuantizationKernel : public ICpuKernel
+class CpuQuantizeKernel : public ICpuKernel
 {
 public:
     /** Default constructor */
-    CpuQuantizationKernel();
-    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizationKernel);
+    CpuQuantizeKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizeKernel);
     /** Set the input, output.
      *
      * @param[in]  src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
@@ -51,11 +50,10 @@
      *
      * @note Output auto initialization is not supported by this kernel
      */
-    void configure(ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantizationKernel
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
-     * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+     * Similar to @ref CpuQuantizeKernel::configure()
      *
      * @return a status
      */
@@ -66,11 +64,11 @@
     const char *name() const override;
 
 private:
-    /** Common signature for all the specialised @ref NEQuantizationLayerKernel functions
+    /** Common signature for all the specialised @ref CpuQuantizeKernel functions
      *
      * @param[in] window Region on which to execute the kernel.
      */
-    using QuantizationFunctionExecutorPtr = void (CpuQuantizationKernel::*)(const ITensor *src, ITensor *dst, const Window &window);
+    using QuantizeFunctionExecutorPtr = void (CpuQuantizeKernel::*)(const ITensor *src, ITensor *dst, const Window &window);
     /** Function to apply QASYMM8 or QASYMM8_SIGNED quantization on a tensor.
      *
      * @param[in] window Region on which to execute the kernel.
@@ -84,9 +82,9 @@
     template <typename T>
     void run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window);
 
-    QuantizationFunctionExecutorPtr _func;
+    QuantizeFunctionExecutorPtr _func{ nullptr };
 };
 } // namespace kernels
 } // namespace cpu
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H */
+#endif /* ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
similarity index 92%
rename from src/core/gpu/cl/kernels/ClDequantizationKernel.cpp
rename to src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
index 6421a08..f2758b7 100644
--- a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h"
 
 #include "arm_compute/core/CL/CLHelpers.h"
 #include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -29,9 +29,11 @@
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
+
 #include "src/core/CL/CLValidate.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/WindowHelpers.h"
+
 #include "support/Cast.h"
 #include "support/StringSupport.h"
 
@@ -59,11 +61,7 @@
 }
 } // namespace
 
-ClDequantizationKernel::ClDequantizationKernel()
-{
-}
-
-void ClDequantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClDequantizeKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
 
@@ -115,13 +113,13 @@
     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
-Status ClDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
     return Status{};
 }
 
-void ClDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+void ClDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
 {
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.h b/src/core/gpu/cl/kernels/ClDequantizeKernel.h
similarity index 79%
rename from src/core/gpu/cl/kernels/ClDequantizationKernel.h
rename to src/core/gpu/cl/kernels/ClDequantizeKernel.h
index 3ccf90c..33e0164 100644
--- a/src/core/gpu/cl/kernels/ClDequantizationKernel.h
+++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.h
@@ -21,29 +21,26 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H
-#define ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H
+#ifndef ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
 
-#include "arm_compute/core/KernelDescriptors.h"
 #include "src/core/common/Macros.h"
 #include "src/core/gpu/cl/ClCompileContext.h"
 #include "src/core/gpu/cl/IClKernel.h"
 
 namespace arm_compute
 {
-class ICLTensor;
-
 namespace opencl
 {
 namespace kernels
 {
 /** Interface for the dequantization layer kernel. */
-class ClDequantizationKernel : public IClKernel
+class ClDequantizeKernel : public IClKernel
 {
 public:
     /** Default constructor */
-    ClDequantizationKernel();
-    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizationKernel);
+    ClDequantizeKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizeKernel);
     /** Initialise the kernel's input and output
      *
      * @param[in]  compile_context The compile context to be used.
@@ -51,10 +48,9 @@
      * @param[out] dst             Destination tensor info. Data types supported: F16/F32.
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref ClDequantizationKernel
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
-     * @param[in] dst Output tensor info. Data types supported: F16/F32.
+     * Similar to @ref ClDequantizeKernel::configure()
      *
      * @return a status
      */
@@ -66,4 +62,4 @@
 } // namespace kernels
 } // namespace opencl
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H */
+#endif /* ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
similarity index 94%
rename from src/core/gpu/cl/kernels/ClQuantizationKernel.cpp
rename to src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
index 9926123..48d351d 100644
--- a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h"
 
 #include "arm_compute/core/CL/CLHelpers.h"
 #include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -31,8 +31,10 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
 #include "src/core/CL/CLValidate.h"
 #include "src/core/helpers/WindowHelpers.h"
+
 #include "support/Cast.h"
 #include "support/StringSupport.h"
 
@@ -59,11 +61,7 @@
 }
 } // namespace
 
-ClQuantizationKernel::ClQuantizationKernel()
-{
-}
-
-void ClQuantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClQuantizeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
 
@@ -146,13 +144,13 @@
     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
-Status ClQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
     return Status{};
 }
 
-void ClQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+void ClQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
 {
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.h b/src/core/gpu/cl/kernels/ClQuantizeKernel.h
similarity index 77%
rename from src/core/gpu/cl/kernels/ClQuantizationKernel.h
rename to src/core/gpu/cl/kernels/ClQuantizeKernel.h
index 20822cf..8d37f33 100644
--- a/src/core/gpu/cl/kernels/ClQuantizationKernel.h
+++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H
-#define ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H
+#ifndef ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
 
 #include "src/core/common/Macros.h"
 #include "src/core/gpu/cl/ClCompileContext.h"
@@ -30,8 +30,6 @@
 
 namespace arm_compute
 {
-class ICLTensor;
-
 namespace opencl
 {
 namespace kernels
@@ -40,12 +38,12 @@
  *
  * @note The implementation supports only 3D input tensors.
  */
-class ClQuantizationKernel : public IClKernel
+class ClQuantizeKernel : public IClKernel
 {
 public:
     /** Default constructor */
-    ClQuantizationKernel();
-    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizationKernel);
+    ClQuantizeKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizeKernel);
     /** Set the input, output.
      *
      * @param[in]  compile_context The compile context to be used.
@@ -54,11 +52,10 @@
      *
      * @note Output auto initialization is not supported by this kernel
      */
-    void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref ClQuantizationKernel
+    void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
-     * @param[in] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+     * Similar to @ref ClQuantizeKernel::configure()
      *
      * @return a status
      */
@@ -70,4 +67,4 @@
 } // namespace kernels
 } // namespace opencl
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H */
+#endif /* ARM_COMPUTE_CL_QUANTIZE_KERNEL_H */