Rename Quantization/Dequantization kernels/operators to imperative mood

Renames the following kernels/functions
 - [Cl|Cpu]DequantizationKernel -> [Cl|Cpu]DequantizeKernel
 - [Cl|Cpu]Dequantization -> [Cl|Cpu]CpuDequantize
 - [Cl|Cpu]QuantizationKernel -> [Cl|Cpu]QuantizeKernel
 - [Cl|Cpu]Quantization -> [Cl|Cpu]Quantize

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ic3c5eb3b7fe28f807294d159830eef99c2dd6219
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5566
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/cpu/operators/CpuDequantization.cpp b/src/runtime/cpu/operators/CpuDequantize.cpp
similarity index 77%
rename from src/runtime/cpu/operators/CpuDequantization.cpp
rename to src/runtime/cpu/operators/CpuDequantize.cpp
index 0a3f602..80a2e28 100644
--- a/src/runtime/cpu/operators/CpuDequantization.cpp
+++ b/src/runtime/cpu/operators/CpuDequantize.cpp
@@ -21,30 +21,30 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/runtime/cpu/operators/CpuDequantization.h"
+#include "src/runtime/cpu/operators/CpuDequantize.h"
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuDequantizationKernel.h"
+#include "src/core/cpu/kernels/CpuDequantizeKernel.h"
 
 namespace arm_compute
 {
 namespace cpu
 {
-void CpuDequantization::configure(const ITensorInfo *src, ITensorInfo *dst)
+void CpuDequantize::configure(const ITensorInfo *src, ITensorInfo *dst)
 {
-    auto k = std::make_unique<kernels::CpuDequantizationKernel>();
+    auto k = std::make_unique<kernels::CpuDequantizeKernel>();
     k->configure(src, dst);
     _kernel = std::move(k);
 }
 
-Status CpuDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
-    return kernels::CpuDequantizationKernel::validate(src, dst);
+    return kernels::CpuDequantizeKernel::validate(src, dst);
 }
 
-void CpuDequantization::run(ITensorPack &tensors)
+void CpuDequantize::run(ITensorPack &tensors)
 {
     ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
     prepare(tensors);
diff --git a/src/runtime/cpu/operators/CpuDequantization.h b/src/runtime/cpu/operators/CpuDequantize.h
similarity index 73%
rename from src/runtime/cpu/operators/CpuDequantization.h
rename to src/runtime/cpu/operators/CpuDequantize.h
index 22f8114..d1fb9e8 100644
--- a/src/runtime/cpu/operators/CpuDequantization.h
+++ b/src/runtime/cpu/operators/CpuDequantize.h
@@ -21,36 +21,30 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZATION_H
-#define ARM_COMPUTE_CPU_DEQUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_H
+#define ARM_COMPUTE_CPU_DEQUANTIZE_H
 
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
 #include "src/runtime/cpu/ICpuOperator.h"
 
-#include <memory>
-
 namespace arm_compute
 {
 namespace cpu
 {
-/** Basic function to run @ref kernels::CpuDequantizationKernel that dequantizes an input tensor */
-class CpuDequantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuDequantizeKernel that dequantizes an input tensor */
+class CpuDequantize : public ICpuOperator
 {
 public:
     /** Default Constructor */
-    CpuDequantization() = default;
+    CpuDequantize() = default;
     /** Configure the kernel.
      *
      * @param[in]  src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
      * @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32.
      */
     void configure(const ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantization
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
-     * @param[in] dst Destination tensor info. Data type supported: F16/F32.
+     * Similar to @ref CpuDequantize::configure()
      *
      * @return a status
      */
@@ -61,4 +55,4 @@
 };
 } // namespace cpu
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_DEQUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_H */
diff --git a/src/runtime/cpu/operators/CpuQuantization.cpp b/src/runtime/cpu/operators/CpuQuantize.cpp
similarity index 78%
rename from src/runtime/cpu/operators/CpuQuantization.cpp
rename to src/runtime/cpu/operators/CpuQuantize.cpp
index ede1385..5af7f63 100644
--- a/src/runtime/cpu/operators/CpuQuantization.cpp
+++ b/src/runtime/cpu/operators/CpuQuantize.cpp
@@ -22,34 +22,34 @@
  * SOFTWARE.
  */
 
-#include "src/runtime/cpu/operators/CpuQuantization.h"
+#include "src/runtime/cpu/operators/CpuQuantize.h"
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuQuantizationKernel.h"
+#include "src/core/cpu/kernels/CpuQuantizeKernel.h"
 
 namespace arm_compute
 {
 namespace cpu
 {
-Status CpuQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
-    ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizationKernel::validate(src, dst));
+    ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizeKernel::validate(src, dst));
     return Status{};
 }
 
-void CpuQuantization::configure(ITensorInfo *src, ITensorInfo *dst)
+void CpuQuantize::configure(const ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
 
     // Configure quantize kernel
-    auto k = std::make_unique<kernels::CpuQuantizationKernel>();
+    auto k = std::make_unique<kernels::CpuQuantizeKernel>();
     k->configure(src, dst);
     _kernel = std::move(k);
 }
 
-void CpuQuantization::run(ITensorPack &tensors)
+void CpuQuantize::run(ITensorPack &tensors)
 {
     ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
     NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
diff --git a/src/runtime/cpu/operators/CpuQuantization.h b/src/runtime/cpu/operators/CpuQuantize.h
similarity index 69%
rename from src/runtime/cpu/operators/CpuQuantization.h
rename to src/runtime/cpu/operators/CpuQuantize.h
index 97f0c5f..09afffd 100644
--- a/src/runtime/cpu/operators/CpuQuantization.h
+++ b/src/runtime/cpu/operators/CpuQuantize.h
@@ -21,41 +21,30 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_CPU_QUANTIZATION_H
-#define ARM_COMPUTE_CPU_QUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_QUANTIZE_H
+#define ARM_COMPUTE_CPU_QUANTIZE_H
 
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
 #include "src/runtime/cpu/ICpuOperator.h"
 
-#include <memory>
-
 namespace arm_compute
 {
 namespace cpu
 {
-/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) kernels:
- *
- *
- * -# @ref kernels::CpuQuantizationKernel
- *
- */
-class CpuQuantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuQuantizeKernel that dequantizes an input tensor */
+class CpuQuantize : public ICpuOperator
 {
 public:
     /** Default Constructor */
-    CpuQuantization() = default;
+    CpuQuantize() = default;
     /** Set the input and output tensors.
      *
      * @param[in]  src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
      * @param[out] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
      */
-    void configure(ITensorInfo *src, ITensorInfo *dst);
-    /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantization
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+    /** Static function to check if given info will lead to a valid configuration
      *
-     * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
-     * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
+     * Similar to @ref CpuQuantize::configure()
      *
      * @return a status
      */
@@ -66,4 +55,4 @@
 };
 } // namespace cpu
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_QUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_QUANTIZE_H */