COMPMID-3373: Async support to NEArithmetic* kernels/functions (Pt. 2)

Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Change-Id: Iec06adb535aaf7efb1838d921e8d6bb978b7b215
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3498
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayer.h b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
index b9b581c..2e2de61 100644
--- a/arm_compute/runtime/NEON/functions/NELSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
@@ -26,7 +26,6 @@
 
 #include "arm_compute/core/NEON/kernels/NEActivationLayerKernel.h"
 #include "arm_compute/core/NEON/kernels/NECopyKernel.h"
-#include "arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h"
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
@@ -36,6 +35,7 @@
 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
 #include "arm_compute/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
 #include "arm_compute/runtime/common/LSTMParams.h"
 
 namespace arm_compute
@@ -146,89 +146,89 @@
     void prepare() override;
 
 private:
-    MemoryGroup                     _memory_group;
-    NEFullyConnectedLayer           _fully_connected_input_gate;
-    NEArithmeticAddition            _accum_input_gate1;
-    NEArithmeticSubtraction         _subtract_input_gate;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_input_gate;
-    NEActivationLayer               _activation_input_gate;
-    NEFullyConnectedLayer           _fully_connected_forget_gate;
-    NEArithmeticAddition            _accum_forget_gate1;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate;
-    NEActivationLayer               _activation_forget_gate;
-    NEFullyConnectedLayer           _fully_connected_cell_state;
-    NEGEMM                          _gemm_cell_state1;
-    NETransposeKernel               _transpose_cell_state;
-    NEArithmeticAddition            _accum_cell_state1;
-    NEArithmeticAddition            _accum_cell_state2;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_cell_state1;
-    NEActivationLayer               _activation_cell_state;
-    NEActivationLayer               _cell_clip;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_cell_state2;
-    NEFullyConnectedLayer           _fully_connected_output;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_output_state1;
-    NEArithmeticAddition            _accum_output1;
-    NEActivationLayer               _activation_output;
-    NEActivationLayer               _activation_output_state;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_output_state2;
-    NEFullyConnectedLayer           _fully_connected_output_state;
-    NEActivationLayer               _projection_clip;
-    NECopyKernel                    _copy_cell_state;
-    NECopyKernel                    _copy_output;
-    NEConcatenateLayer              _concat_scratch_buffer;
-    NEConcatenateLayer              _concat_inputs_forget_gate;
-    NEConcatenateLayer              _concat_weights_forget_gate;
-    NEConcatenateLayer              _concat_weights_input_gate;
-    NEConcatenateLayer              _concat_weights_output;
-    NEMeanStdDevNormalizationLayer  _mean_std_norm_input_gate;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_input_gate_coeff;
-    NEArithmeticAddition            _accum_input_gate_bias;
-    NEMeanStdDevNormalizationLayer  _mean_std_norm_forget_gate;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate_coeff;
-    NEArithmeticAddition            _accum_forget_gate_bias;
-    NEMeanStdDevNormalizationLayer  _mean_std_norm_cell_gate;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_cell_gate_coeff;
-    NEArithmeticAddition            _accum_cell_gate_bias;
-    NEMeanStdDevNormalizationLayer  _mean_std_norm_output_gate;
-    NEPixelWiseMultiplicationKernel _pixelwise_mul_output_gate_coeff;
-    NEArithmeticAddition            _accum_output_gate_bias;
-    Tensor                          _input_gate_out1;
-    Tensor                          _input_gate_out2;
-    Tensor                          _input_gate_out3;
-    Tensor                          _input_gate_out4;
-    Tensor                          _forget_gate_out1;
-    Tensor                          _forget_gate_out2;
-    Tensor                          _forget_gate_out3;
-    Tensor                          _forget_gate_out4;
-    Tensor                          _forget_gate_out5;
-    Tensor                          _forget_gate_out6;
-    Tensor                          _cell_state_out1;
-    Tensor                          _cell_state_out2;
-    Tensor                          _cell_state_out3;
-    Tensor                          _cell_state_out4;
-    Tensor                          _cell_state_out5;
-    Tensor                          _output1;
-    Tensor                          _output2;
-    Tensor                          _output3;
-    Tensor                          _output4;
-    Tensor                          _cell_state_activation;
-    Tensor                          _output_state1;
-    Tensor                          _ones;
-    Tensor                          _input_layer_norm_out1;
-    Tensor                          _input_layer_norm_out2;
-    Tensor                          _forget_layer_norm_out1;
-    Tensor                          _forget_layer_norm_out2;
-    Tensor                          _cell_layer_norm_out1;
-    Tensor                          _cell_layer_norm_out2;
-    Tensor                          _output_layer_norm_out1;
-    Tensor                          _output_layer_norm_out2;
-    bool                            _run_peephole_opt;
-    bool                            _run_cifg_opt;
-    bool                            _perform_cell_clipping;
-    bool                            _has_projection_weights;
-    bool                            _perform_projection_clipping;
-    bool                            _is_prepared;
-    bool                            _is_layer_norm_lstm;
+    MemoryGroup                    _memory_group;
+    NEFullyConnectedLayer          _fully_connected_input_gate;
+    NEArithmeticAddition           _accum_input_gate1;
+    NEArithmeticSubtraction        _subtract_input_gate;
+    NEPixelWiseMultiplication      _pixelwise_mul_input_gate;
+    NEActivationLayer              _activation_input_gate;
+    NEFullyConnectedLayer          _fully_connected_forget_gate;
+    NEArithmeticAddition           _accum_forget_gate1;
+    NEPixelWiseMultiplication      _pixelwise_mul_forget_gate;
+    NEActivationLayer              _activation_forget_gate;
+    NEFullyConnectedLayer          _fully_connected_cell_state;
+    NEGEMM                         _gemm_cell_state1;
+    NETransposeKernel              _transpose_cell_state;
+    NEArithmeticAddition           _accum_cell_state1;
+    NEArithmeticAddition           _accum_cell_state2;
+    NEPixelWiseMultiplication      _pixelwise_mul_cell_state1;
+    NEActivationLayer              _activation_cell_state;
+    NEActivationLayer              _cell_clip;
+    NEPixelWiseMultiplication      _pixelwise_mul_cell_state2;
+    NEFullyConnectedLayer          _fully_connected_output;
+    NEPixelWiseMultiplication      _pixelwise_mul_output_state1;
+    NEArithmeticAddition           _accum_output1;
+    NEActivationLayer              _activation_output;
+    NEActivationLayer              _activation_output_state;
+    NEPixelWiseMultiplication      _pixelwise_mul_output_state2;
+    NEFullyConnectedLayer          _fully_connected_output_state;
+    NEActivationLayer              _projection_clip;
+    NECopyKernel                   _copy_cell_state;
+    NECopyKernel                   _copy_output;
+    NEConcatenateLayer             _concat_scratch_buffer;
+    NEConcatenateLayer             _concat_inputs_forget_gate;
+    NEConcatenateLayer             _concat_weights_forget_gate;
+    NEConcatenateLayer             _concat_weights_input_gate;
+    NEConcatenateLayer             _concat_weights_output;
+    NEMeanStdDevNormalizationLayer _mean_std_norm_input_gate;
+    NEPixelWiseMultiplication      _pixelwise_mul_input_gate_coeff;
+    NEArithmeticAddition           _accum_input_gate_bias;
+    NEMeanStdDevNormalizationLayer _mean_std_norm_forget_gate;
+    NEPixelWiseMultiplication      _pixelwise_mul_forget_gate_coeff;
+    NEArithmeticAddition           _accum_forget_gate_bias;
+    NEMeanStdDevNormalizationLayer _mean_std_norm_cell_gate;
+    NEPixelWiseMultiplication      _pixelwise_mul_cell_gate_coeff;
+    NEArithmeticAddition           _accum_cell_gate_bias;
+    NEMeanStdDevNormalizationLayer _mean_std_norm_output_gate;
+    NEPixelWiseMultiplication      _pixelwise_mul_output_gate_coeff;
+    NEArithmeticAddition           _accum_output_gate_bias;
+    Tensor                         _input_gate_out1;
+    Tensor                         _input_gate_out2;
+    Tensor                         _input_gate_out3;
+    Tensor                         _input_gate_out4;
+    Tensor                         _forget_gate_out1;
+    Tensor                         _forget_gate_out2;
+    Tensor                         _forget_gate_out3;
+    Tensor                         _forget_gate_out4;
+    Tensor                         _forget_gate_out5;
+    Tensor                         _forget_gate_out6;
+    Tensor                         _cell_state_out1;
+    Tensor                         _cell_state_out2;
+    Tensor                         _cell_state_out3;
+    Tensor                         _cell_state_out4;
+    Tensor                         _cell_state_out5;
+    Tensor                         _output1;
+    Tensor                         _output2;
+    Tensor                         _output3;
+    Tensor                         _output4;
+    Tensor                         _cell_state_activation;
+    Tensor                         _output_state1;
+    Tensor                         _ones;
+    Tensor                         _input_layer_norm_out1;
+    Tensor                         _input_layer_norm_out2;
+    Tensor                         _forget_layer_norm_out1;
+    Tensor                         _forget_layer_norm_out2;
+    Tensor                         _cell_layer_norm_out1;
+    Tensor                         _cell_layer_norm_out2;
+    Tensor                         _output_layer_norm_out1;
+    Tensor                         _output_layer_norm_out2;
+    bool                           _run_peephole_opt;
+    bool                           _run_cifg_opt;
+    bool                           _perform_cell_clipping;
+    bool                           _has_projection_weights;
+    bool                           _perform_projection_clipping;
+    bool                           _is_prepared;
+    bool                           _is_layer_norm_lstm;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NELSTMLAYER_H */
diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
index 8683e44..bead014 100644
--- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
@@ -28,10 +28,10 @@
 
 #include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
 #include "arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h"
-#include "arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
 #include "arm_compute/runtime/Tensor.h"
 
 #include <memory>
@@ -42,7 +42,7 @@
 
 /** Basic function to compute a normalization layer. This function calls the following NEON kernels:
  *
- * -# @ref NEPixelWiseMultiplicationKernel
+ * -# @ref NEPixelWiseMultiplication
  * -# @ref NEFillBorderKernel
  * -# @ref NENormalizationLayerKernel
  *
@@ -75,10 +75,10 @@
     void run() override;
 
 private:
-    MemoryGroup                     _memory_group;    /**< Function memory group */
-    NENormalizationLayerKernel      _norm_kernel;     /**< Normalization layer kernel */
-    NEPixelWiseMultiplicationKernel _multiply_kernel; /**< Pixel multiplication kernel */
-    Tensor                          _input_squared;   /**< The intermediate buffer which stores results of squaring input */
+    MemoryGroup                _memory_group;  /**< Function memory group */
+    NENormalizationLayerKernel _norm_kernel;   /**< Normalization layer kernel */
+    NEPixelWiseMultiplication  _multiply_f;    /**< Pixel multiplication function */
+    Tensor                     _input_squared; /**< The intermediate buffer which stores results of squaring input */
 };
 }
 #endif /* ARM_COMPUTE_NENORMALIZATIONLAYER_H */
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index d84dff2..3b12093 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -25,15 +25,17 @@
 #define ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
 
 #include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunction.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/NEON/INEOperator.h"
 
 namespace arm_compute
 {
 class ITensor;
 
+namespace experimental
+{
 /** Basic function to run @ref NEPixelWiseMultiplicationKernel */
-class NEPixelWiseMultiplication : public INESimpleFunctionNoBorder
+class NEPixelWiseMultiplication : public INEOperator
 {
 public:
     /** Initialise the kernel's inputs, output and convertion policy.
@@ -60,7 +62,7 @@
      * @param[in]      rounding_policy Rounding policy.
      * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
      */
-    void configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
+    void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
                    const ActivationLayerInfo &act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
      *
@@ -88,10 +90,13 @@
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
                            const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
 };
 
 /** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
-class NEComplexPixelWiseMultiplication : public INESimpleFunction
+class NEComplexPixelWiseMultiplication : public INEOperator
 {
 public:
     /** Initialise the kernel's inputs, output.
@@ -103,6 +108,123 @@
      * @param[out]     output   The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
      */
+    void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
+     *
+     * @param[in] input1   An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+     * @param[in] input2   An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     * @param[in] output   The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+} // namespace experimental
+
+/** Basic function to run @ref NEPixelWiseMultiplicationKernel */
+class NEPixelWiseMultiplication : public IFunction
+{
+public:
+    /** Default Constructor */
+    NEPixelWiseMultiplication();
+    /** Default Destructor */
+    ~NEPixelWiseMultiplication();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPixelWiseMultiplication(const NEPixelWiseMultiplication &) = delete;
+    /** Default move constructor */
+    NEPixelWiseMultiplication(NEPixelWiseMultiplication &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPixelWiseMultiplication &operator=(const NEPixelWiseMultiplication &) = delete;
+    /** Default move assignment operator */
+    NEPixelWiseMultiplication &operator=(NEPixelWiseMultiplication &&);
+    /** Initialise the kernel's inputs, output and convertion policy.
+     *
+     * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
+     *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
+     *
+     * @param[in, out] input1          An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+     *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2          An input tensor. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+     *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output          Output tensor. Data types supported:
+     *                                 - U8, only if both inputs are U8.
+     *                                 - QASYMM8, only if both inputs are QASYMM8.
+     *                                 - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
+     *                                 - S16.
+     *                                 - QSYMM16, only if both inputs are QSYMM16.
+     *                                 - S32, only if both inputs are QSYMM16.
+     *                                 - F16, only if @p input1 is F16.
+     *                                 - F32, only if both inputs are F32.
+     * @param[in]      scale           Scale to apply after multiplication.
+     *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
+     * @param[in]      overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if datatype is QASYMM8, QASYMM8_SIGNED or QSYMM16.
+     * @param[in]      rounding_policy Rounding policy.
+     * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
+                   const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
+     *
+     * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
+     *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
+     *
+     * @param[in] input1          An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+     * @param[in] input2          An input tensor info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, QSYMM16 (only if both inputs are QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+     * @param[in] output          Output tensor info. Data types supported:
+     *                            - U8, only if both inputs are U8.
+     *                            - QASYMM8, only if both inputs are QASYMM8.
+     *                            - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
+     *                            - S16.
+     *                            - QSYMM16, only if both inputs are QSYMM16.
+     *                            - S32, only if both inputs are QSYMM16.
+     *                            - F16, only if @p input1 is F16.
+     *                            - F32, only if both inputs are F32.
+     * @param[in] scale           Scale to apply after multiplication.
+     *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
+     * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if datatype is QASYMM8, QASYMM8_SIGNED or QSYMM16.
+     * @param[in] rounding_policy Rounding policy.
+     * @param[in] act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
+                           const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
+};
+
+/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
+class NEComplexPixelWiseMultiplication : public IFunction
+{
+public:
+    /** Default Constructor */
+    NEComplexPixelWiseMultiplication();
+    /** Default Destructor */
+    ~NEComplexPixelWiseMultiplication();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEComplexPixelWiseMultiplication(const NEComplexPixelWiseMultiplication &) = delete;
+    /** Default move constructor */
+    NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEComplexPixelWiseMultiplication &operator=(const NEComplexPixelWiseMultiplication &) = delete;
+    /** Default move assignment operator */
+    NEComplexPixelWiseMultiplication &operator=(NEComplexPixelWiseMultiplication &&);
+    /** Initialise the kernel's inputs, output.
+     *
+     * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+     *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2   An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output   The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
     void configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
      *
@@ -112,6 +234,13 @@
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 }
 #endif /*ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H */
diff --git a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
index 60c8fa1..a19310d 100644
--- a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
@@ -26,7 +26,6 @@
 
 #include "arm_compute/core/NEON/kernels/NECopyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h"
 #include "arm_compute/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
@@ -34,6 +33,7 @@
 #include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
 #include "arm_compute/runtime/NEON/functions/NETranspose.h"
 
 #include "arm_compute/runtime/common/LSTMParams.h"
@@ -54,7 +54,7 @@
  * -# @ref NEGEMMLowpMatrixMultiplyCore                          Quantized matrix multiplication core. Accumulators are 32-bit integers
  * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint   Convert 32-bit integers into QSYMM16
  * -# @ref NEGEMMLowpMatrixAReductionKernel                      For precomputing effective biases to use
- * -# @ref NEPixelWiseMultiplicationKernel                       Elementwise multiplication
+ * -# @ref NEPixelWiseMultiplication                       Elementwise multiplication
  * -# @ref NETranspose                                           Transpose function for reshaping the weights
  * */
 class NEQLSTMLayer : public IFunction
@@ -257,7 +257,7 @@
     NEArithmeticAddition             _projection_bias_add{};
     NEGEMMLowpMatrixMultiplyCore     _mm_input_to_forget{};
     NEGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_forget{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_forget{};
+    NEPixelWiseMultiplication        _pixelwise_mul_cell_to_forget{};
     NEGEMMLowpOutputStage            _input_to_forget_outstage{};
     NEGEMMLowpOutputStage            _recurrent_to_forget_outstage{};
     NEGEMMLowpOutputStage            _cell_to_forget_outstage{};
@@ -276,12 +276,12 @@
     NEGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_input{};
     NEGEMMLowpOutputStage            _recurrent_to_input_outstage{};
     NEArithmeticAddition             _accumulate_input_recurrent_input{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_input{};
+    NEPixelWiseMultiplication        _pixelwise_mul_cell_to_input{};
     NEGEMMLowpOutputStage            _cell_to_input_outstage{};
     NEArithmeticAddition             _accumulate_cell_input{};
     NEActivationLayer                _input_gate_sigmoid{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_forget_cell{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_input_cell{};
+    NEPixelWiseMultiplication        _pixelwise_mul_forget_cell{};
+    NEPixelWiseMultiplication        _pixelwise_mul_input_cell{};
     NEArithmeticAddition             _add_forget_cell{};
     NEActivationLayer                _cell_clip{};
     NEGEMMLowpMatrixMultiplyCore     _mm_input_to_output{};
@@ -289,12 +289,12 @@
     NEGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_output{};
     NEGEMMLowpOutputStage            _recurrent_to_output_outstage{};
     NEArithmeticAddition             _accumulate_input_recurrent_output{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_output{};
+    NEPixelWiseMultiplication        _pixelwise_mul_cell_to_output{};
     NEGEMMLowpOutputStage            _cell_to_output_outstage{};
     NEArithmeticAddition             _accumulate_cell_to_output{};
     NEActivationLayer                _output_gate_sigmoid{};
     NEActivationLayer                _hidden_tanh{};
-    NEPixelWiseMultiplicationKernel  _pixelwise_mul_hidden{};
+    NEPixelWiseMultiplication        _pixelwise_mul_hidden{};
     NEGEMMLowpOutputStage            _hidden_outstage{};
     NEGEMMLowpMatrixMultiplyCore     _mm_projection{};
     NEGEMMLowpOutputStage            _projection_outstage{};