Port Arm(R) Neon(TM) Quantization to new API

Partially resolves: COMPMID-4193

Change-Id: I91dc964d4308687e76127c305a6bedca796f8ba0
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5246
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
index 8b0532b..54ec76b 100644
--- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
@@ -24,26 +24,37 @@
 #ifndef ARM_COMPUTE_NEQUANTIZATIONLAYER_H
 #define ARM_COMPUTE_NEQUANTIZATIONLAYER_H
 
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IRuntimeContext.h"
+
+#include <memory>
 
 namespace arm_compute
 {
 class ITensor;
 class ITensorInfo;
 
-/** Basic function to simulate a quantization layer. This function calls the following Neon kernels:
+/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) implementation layers:
  *
  *
- * -# @ref NEQuantizationLayerKernel
+ * -# @ref cpu::CpuQuantization
  *
  */
-class NEQuantizationLayer : public INESimpleFunctionNoBorder
+class NEQuantizationLayer : public IFunction
 {
 public:
+    NEQuantizationLayer();
+    /** Default Destructor */
+    ~NEQuantizationLayer();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEQuantizationLayer(const NEQuantizationLayer &) = delete;
+    /** Default move constructor */
+    NEQuantizationLayer(NEQuantizationLayer &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEQuantizationLayer &operator=(const NEQuantizationLayer &) = delete;
+    /** Default move assignment operator */
+    NEQuantizationLayer &operator=(NEQuantizationLayer &&) = default;
     /** Set the input and output tensors.
      *
      * @param[in]  input  Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
@@ -58,6 +69,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NEQUANTIZATIONLAYER_H */