Make CpuPixelWiseMultiplicationKernel stateless

Resolves: COMPMID-4183

Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: Ie535c4129a6164b879fb5c4acb15f2be58ee8b6c
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5325
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index 91cf44f..6f4cce3 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,113 +26,15 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/NEON/INEOperator.h"
+
+#include <memory>
 
 namespace arm_compute
 {
 class ITensor;
 class ITensorInfo;
 
-namespace experimental
-{
-/** Basic function to run @ref NEPixelWiseMultiplicationKernel */
-class NEPixelWiseMultiplication : public INEOperator
-{
-public:
-    /** Initialise the kernel's inputs, output and convertion policy.
-     *
-     * Valid configurations (Input1,Input2) -> Output :
-     *
-     *                                                       Support: Broadcast? Scale=1/255?
-     *   - (U8,U8)                         -> U8, S16                 N          Y
-     *   - (U8,S16)                        -> S16                     N          Y
-     *   - (S16,U8)                        -> S16                     N          Y
-     *   - (S16,S16)                       -> S16                     N          Y
-     *   - (S32,S32)                       -> S32                     Y          N
-     *   - (F16,F16)                       -> F16                     N          Y
-     *   - (F32,F32)                       -> F32                     Y          Y
-     *   - (QASYMM8,QASYMM8)               -> QASYMM8                 Y          Y
-     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED          Y          Y
-     *   - (QSYMM16,QSYMM16)               -> QSYMM16, S32            N          Y
-     *
-     * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
-     *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
-     *
-     * @param[in, out] input1          First input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
-     *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[in, out] input2          Second input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
-     *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     output          Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
-     * @param[in]      scale           Scale to apply after multiplication.
-     *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
-     *                                 If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
-     * @param[in]      overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
-     * @param[in]      rounding_policy Rounding policy.
-     * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
-     */
-    void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
-                   const ActivationLayerInfo &act_info = ActivationLayerInfo());
-    /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
-     *
-     * Valid configurations (Input1,Input2) -> Output :
-     *
-     *                                                       Support: Broadcast? Scale=1/255?
-     *   - (U8,U8)                         -> U8, S16                 N          Y
-     *   - (U8,S16)                        -> S16                     N          Y
-     *   - (S16,U8)                        -> S16                     N          Y
-     *   - (S16,S16)                       -> S16                     N          Y
-     *   - (S32,S32)                       -> S32                     Y          N
-     *   - (F16,F16)                       -> F16                     N          Y
-     *   - (F32,F32)                       -> F32                     Y          Y
-     *   - (QASYMM8,QASYMM8)               -> QASYMM8                 Y          Y
-     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED          Y          Y
-     *   - (QSYMM16,QSYMM16)               -> QSYMM16, S32            N          Y
-     *
-     * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
-     *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
-     *
-     * @param[in] input1          First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
-     * @param[in] input2          Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
-     * @param[in] output          Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
-     * @param[in] scale           Scale to apply after multiplication.
-     *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
-     *                            If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
-     * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
-     * @param[in] rounding_policy Rounding policy.
-     * @param[in] act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
-                           const ActivationLayerInfo &act_info = ActivationLayerInfo());
-};
-
-/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
-class NEComplexPixelWiseMultiplication : public INEOperator
-{
-public:
-    /** Initialise the kernel's inputs, output.
-     *
-     * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
-     *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[in, out] input2   An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
-     *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     output   The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
-     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
-     */
-    void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-    /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
-     *
-     * @param[in] input1   An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
-     * @param[in] input2   An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
-     * @param[in] output   The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
-     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-};
-} // namespace experimental
-
-/** Basic function to run @ref NEPixelWiseMultiplicationKernel */
+/** Basic function to run @ref cpu::CpuPixelWiseMultiplication */
 class NEPixelWiseMultiplication : public IFunction
 {
 public:
@@ -143,11 +45,11 @@
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NEPixelWiseMultiplication(const NEPixelWiseMultiplication &) = delete;
     /** Default move constructor */
-    NEPixelWiseMultiplication(NEPixelWiseMultiplication &&);
+    NEPixelWiseMultiplication(NEPixelWiseMultiplication &&) = default;
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NEPixelWiseMultiplication &operator=(const NEPixelWiseMultiplication &) = delete;
     /** Default move assignment operator */
-    NEPixelWiseMultiplication &operator=(NEPixelWiseMultiplication &&);
+    NEPixelWiseMultiplication &operator=(NEPixelWiseMultiplication &&) = default;
     /** Initialise the kernel's inputs, output and convertion policy.
      *
      * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
@@ -211,7 +113,7 @@
     std::unique_ptr<Impl> _impl;
 };
 
-/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
+/** Basic function to run @ref cpu::CpuComplexPixelWiseMultiplication. */
 class NEComplexPixelWiseMultiplication : public IFunction
 {
 public:
@@ -222,11 +124,11 @@
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NEComplexPixelWiseMultiplication(const NEComplexPixelWiseMultiplication &) = delete;
     /** Default move constructor */
-    NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&);
+    NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&) = default;
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NEComplexPixelWiseMultiplication &operator=(const NEComplexPixelWiseMultiplication &) = delete;
     /** Default move assignment operator */
-    NEComplexPixelWiseMultiplication &operator=(NEComplexPixelWiseMultiplication &&);
+    NEComplexPixelWiseMultiplication &operator=(NEComplexPixelWiseMultiplication &&) = default;
     /** Initialise the kernel's inputs, output.
      *
      * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
@@ -253,5 +155,5 @@
     struct Impl;
     std::unique_ptr<Impl> _impl;
 };
-}
+} // namespace arm_compute
 #endif /*ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H */