COMPMID-3385: Async support to CLArithmetic* kernels/functions Pt.2

Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Change-Id: Idc5ac2dd2ba5295c00c88b44a783645327a27e15
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3617
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/CL/functions/CLLSTMLayer.h b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
index abfcc3a..1a8b334 100644
--- a/arm_compute/runtime/CL/functions/CLLSTMLayer.h
+++ b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
@@ -28,7 +28,6 @@
 
 #include "arm_compute/core/CL/kernels/CLCopyKernel.h"
 #include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
-#include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
@@ -37,6 +36,7 @@
 #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
 #include "arm_compute/runtime/CL/functions/CLMeanStdDevNormalizationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
 #include "arm_compute/runtime/common/LSTMParams.h"
@@ -97,7 +97,7 @@
                    const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
                    const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
                    const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
-                   const ICLTensor *output_state_in, const ICLTensor *cell_state_in,
+                   const ICLTensor *output_state_in, ICLTensor *cell_state_in,
                    ICLTensor *scratch_buffer, ICLTensor *output_state_out, ICLTensor *cell_state_out, ICLTensor *output,
                    const LSTMParams<ICLTensor> &lstm_params, const ActivationLayerInfo &activation_info, float cell_threshold = 0.f, float projection_threshold = 0.f);
     /** Initialize function's tensors.
@@ -143,7 +143,7 @@
                    const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
                    const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
                    const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
-                   const ICLTensor *output_state_in, const ICLTensor *cell_state_in,
+                   const ICLTensor *output_state_in, ICLTensor *cell_state_in,
                    ICLTensor *scratch_buffer, ICLTensor *output_state_out, ICLTensor *cell_state_out, ICLTensor *output,
                    const LSTMParams<ICLTensor> &lstm_params, const ActivationLayerInfo &activation_info, float cell_threshold = 0.f, float projection_threshold = 0.f);
 
@@ -200,90 +200,90 @@
     void prepare() override;
 
 private:
-    MemoryGroup                     _memory_group;
-    CLFullyConnectedLayer           _fully_connected_input_gate;
-    CLArithmeticAddition            _accum_input_gate1;
-    CLArithmeticSubtraction         _subtract_input_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate;
-    CLActivationLayer               _activation_input_gate;
-    CLFullyConnectedLayer           _fully_connected_forget_gate;
-    CLArithmeticAddition            _accum_forget_gate1;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate;
-    CLActivationLayer               _activation_forget_gate;
-    CLFullyConnectedLayer           _fully_connected_cell_state;
-    CLGEMM                          _gemm_cell_state1;
-    CLTransposeKernel               _transpose_cell_state;
-    CLArithmeticAddition            _accum_cell_state1;
-    CLArithmeticAddition            _accum_cell_state2;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state1;
-    CLActivationLayer               _activation_cell_state;
-    CLActivationLayer               _cell_clip;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state2;
-    CLFullyConnectedLayer           _fully_connected_output;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state1;
-    CLArithmeticAddition            _accum_output1;
-    CLActivationLayer               _activation_output;
-    CLActivationLayer               _activation_output_state;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state2;
-    CLFullyConnectedLayer           _fully_connected_output_state;
-    CLActivationLayer               _projection_clip;
-    CLCopyKernel                    _copy_cell_state;
-    CLCopyKernel                    _copy_output;
-    CLConcatenateLayer              _concat_scratch_buffer;
-    CLConcatenateLayer              _concat_inputs_forget_gate;
-    CLConcatenateLayer              _concat_weights_forget_gate;
-    CLConcatenateLayer              _concat_weights_input_gate;
-    CLConcatenateLayer              _concat_weights_output;
-    CLMemsetKernel                  _ones_memset_kernel;
-    CLMeanStdDevNormalizationLayer  _mean_std_norm_input_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate_coeff;
-    CLArithmeticAddition            _accum_input_gate_bias;
-    CLMeanStdDevNormalizationLayer  _mean_std_norm_forget_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate_coeff;
-    CLArithmeticAddition            _accum_forget_gate_bias;
-    CLMeanStdDevNormalizationLayer  _mean_std_norm_cell_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_gate_coeff;
-    CLArithmeticAddition            _accum_cell_gate_bias;
-    CLMeanStdDevNormalizationLayer  _mean_std_norm_output_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_output_gate_coeff;
-    CLArithmeticAddition            _accum_output_gate_bias;
-    CLTensor                        _input_gate_out1;
-    CLTensor                        _input_gate_out2;
-    CLTensor                        _input_gate_out3;
-    CLTensor                        _input_gate_out4;
-    CLTensor                        _forget_gate_out1;
-    CLTensor                        _forget_gate_out2;
-    CLTensor                        _forget_gate_out3;
-    CLTensor                        _forget_gate_out4;
-    CLTensor                        _forget_gate_out5;
-    CLTensor                        _forget_gate_out6;
-    CLTensor                        _cell_state_out1;
-    CLTensor                        _cell_state_out2;
-    CLTensor                        _cell_state_out3;
-    CLTensor                        _cell_state_out4;
-    CLTensor                        _cell_state_out5;
-    CLTensor                        _output1;
-    CLTensor                        _output2;
-    CLTensor                        _output3;
-    CLTensor                        _output4;
-    CLTensor                        _cell_state_activation;
-    CLTensor                        _output_state1;
-    CLTensor                        _ones;
-    CLTensor                        _input_layer_norm_out1;
-    CLTensor                        _input_layer_norm_out2;
-    CLTensor                        _forget_layer_norm_out1;
-    CLTensor                        _forget_layer_norm_out2;
-    CLTensor                        _cell_layer_norm_out1;
-    CLTensor                        _cell_layer_norm_out2;
-    CLTensor                        _output_layer_norm_out1;
-    CLTensor                        _output_layer_norm_out2;
-    bool                            _run_peephole_opt;
-    bool                            _run_cifg_opt;
-    bool                            _perform_cell_clipping;
-    bool                            _has_projection_weights;
-    bool                            _perform_projection_clipping;
-    bool                            _is_prepared;
-    bool                            _is_layer_norm_lstm;
+    MemoryGroup                    _memory_group;
+    CLFullyConnectedLayer          _fully_connected_input_gate;
+    CLArithmeticAddition           _accum_input_gate1;
+    CLArithmeticSubtraction        _subtract_input_gate;
+    CLPixelWiseMultiplication      _pixelwise_mul_input_gate;
+    CLActivationLayer              _activation_input_gate;
+    CLFullyConnectedLayer          _fully_connected_forget_gate;
+    CLArithmeticAddition           _accum_forget_gate1;
+    CLPixelWiseMultiplication      _pixelwise_mul_forget_gate;
+    CLActivationLayer              _activation_forget_gate;
+    CLFullyConnectedLayer          _fully_connected_cell_state;
+    CLGEMM                         _gemm_cell_state1;
+    CLTransposeKernel              _transpose_cell_state;
+    CLArithmeticAddition           _accum_cell_state1;
+    CLArithmeticAddition           _accum_cell_state2;
+    CLPixelWiseMultiplication      _pixelwise_mul_cell_state1;
+    CLActivationLayer              _activation_cell_state;
+    CLActivationLayer              _cell_clip;
+    CLPixelWiseMultiplication      _pixelwise_mul_cell_state2;
+    CLFullyConnectedLayer          _fully_connected_output;
+    CLPixelWiseMultiplication      _pixelwise_mul_output_state1;
+    CLArithmeticAddition           _accum_output1;
+    CLActivationLayer              _activation_output;
+    CLActivationLayer              _activation_output_state;
+    CLPixelWiseMultiplication      _pixelwise_mul_output_state2;
+    CLFullyConnectedLayer          _fully_connected_output_state;
+    CLActivationLayer              _projection_clip;
+    CLCopyKernel                   _copy_cell_state;
+    CLCopyKernel                   _copy_output;
+    CLConcatenateLayer             _concat_scratch_buffer;
+    CLConcatenateLayer             _concat_inputs_forget_gate;
+    CLConcatenateLayer             _concat_weights_forget_gate;
+    CLConcatenateLayer             _concat_weights_input_gate;
+    CLConcatenateLayer             _concat_weights_output;
+    CLMemsetKernel                 _ones_memset_kernel;
+    CLMeanStdDevNormalizationLayer _mean_std_norm_input_gate;
+    CLPixelWiseMultiplication      _pixelwise_mul_input_gate_coeff;
+    CLArithmeticAddition           _accum_input_gate_bias;
+    CLMeanStdDevNormalizationLayer _mean_std_norm_forget_gate;
+    CLPixelWiseMultiplication      _pixelwise_mul_forget_gate_coeff;
+    CLArithmeticAddition           _accum_forget_gate_bias;
+    CLMeanStdDevNormalizationLayer _mean_std_norm_cell_gate;
+    CLPixelWiseMultiplication      _pixelwise_mul_cell_gate_coeff;
+    CLArithmeticAddition           _accum_cell_gate_bias;
+    CLMeanStdDevNormalizationLayer _mean_std_norm_output_gate;
+    CLPixelWiseMultiplication      _pixelwise_mul_output_gate_coeff;
+    CLArithmeticAddition           _accum_output_gate_bias;
+    CLTensor                       _input_gate_out1;
+    CLTensor                       _input_gate_out2;
+    CLTensor                       _input_gate_out3;
+    CLTensor                       _input_gate_out4;
+    CLTensor                       _forget_gate_out1;
+    CLTensor                       _forget_gate_out2;
+    CLTensor                       _forget_gate_out3;
+    CLTensor                       _forget_gate_out4;
+    CLTensor                       _forget_gate_out5;
+    CLTensor                       _forget_gate_out6;
+    CLTensor                       _cell_state_out1;
+    CLTensor                       _cell_state_out2;
+    CLTensor                       _cell_state_out3;
+    CLTensor                       _cell_state_out4;
+    CLTensor                       _cell_state_out5;
+    CLTensor                       _output1;
+    CLTensor                       _output2;
+    CLTensor                       _output3;
+    CLTensor                       _output4;
+    CLTensor                       _cell_state_activation;
+    CLTensor                       _output_state1;
+    CLTensor                       _ones;
+    CLTensor                       _input_layer_norm_out1;
+    CLTensor                       _input_layer_norm_out2;
+    CLTensor                       _forget_layer_norm_out1;
+    CLTensor                       _forget_layer_norm_out2;
+    CLTensor                       _cell_layer_norm_out1;
+    CLTensor                       _cell_layer_norm_out2;
+    CLTensor                       _output_layer_norm_out1;
+    CLTensor                       _output_layer_norm_out2;
+    bool                           _run_peephole_opt;
+    bool                           _run_cifg_opt;
+    bool                           _perform_cell_clipping;
+    bool                           _has_projection_weights;
+    bool                           _perform_projection_clipping;
+    bool                           _is_prepared;
+    bool                           _is_layer_norm_lstm;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CLLSTMLAYER_H */
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index b87daba..ca8d77e 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -24,18 +24,141 @@
 #ifndef ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H
 #define ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H
 
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
+#include "arm_compute/runtime/CL/ICLOperator.h"
+#include "arm_compute/runtime/IFunction.h"
 
 namespace arm_compute
 {
 // Forward declaration
 class ICLTensor;
 
+namespace experimental
+{
 /** Basic function to run @ref CLPixelWiseMultiplicationKernel. */
-class CLPixelWiseMultiplication : public ICLSimpleFunction
+class CLPixelWiseMultiplication : public ICLOperator
 {
 public:
+    /** Default Constructor */
+    CLPixelWiseMultiplication();
+    /** Initialise the kernel's inputs, output and convertion policy.
+     *
+     * Valid configurations (Input1,Input2) -> Output :
+     *
+     *   - (U8,U8)                         -> U8
+     *   - (U8,U8)                         -> S16
+     *   - (U8,S16)                        -> S16
+     *   - (S16,U8)                        -> S16
+     *   - (S16,S16)                       -> S16
+     *   - (F16,F16)                       -> F16
+     *   - (F32,F32)                       -> F32
+     *   - (QASYMM8,QASYMM8)               -> QASYMM8
+     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+     *   - (QSYMM16,QSYMM16)               -> QSYMM16
+     *   - (QSYMM16,QSYMM16)               -> S32
+     *
+     * @param[in]      compile_context The compile context to be used.
+     * @param[in, out] input1          An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2          An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output          The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     * @param[in]      scale           Scale to apply after multiplication.
+     *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
+     * @param[in]      overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+     * @param[in]      rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+     * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation.
+     */
+    void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
+                   ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
+     *
+     * Valid configurations (Input1,Input2) -> Output :
+     *
+     *   - (U8,U8)                         -> U8
+     *   - (U8,U8)                         -> S16
+     *   - (U8,S16)                        -> S16
+     *   - (S16,U8)                        -> S16
+     *   - (S16,S16)                       -> S16
+     *   - (F16,F16)                       -> F16
+     *   - (F32,F32)                       -> F32
+     *   - (QASYMM8,QASYMM8)               -> QASYMM8
+     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+     *   - (QSYMM16,QSYMM16)               -> QSYMM16
+     *   - (QSYMM16,QSYMM16)               -> S32
+     *
+     *
+     * @param[in] input1          An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     * @param[in] input2          An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     * @param[in] output          The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+     * @param[in] scale           Scale to apply after multiplication.
+     *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
+     * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+     * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+     * @param[in] act_info        (Optional) Activation layer information in case of a fused activation.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+                           ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+
+private:
+    CLFillBorderKernel _border_handler;
+};
+
+/** Basic function to run @ref CLComplexPixelWiseMultiplicationKernel. */
+class CLComplexPixelWiseMultiplication : public ICLOperator
+{
+public:
+    /** Default Constructor */
+    CLComplexPixelWiseMultiplication();
+    /** Initialise the kernel's inputs, output.
+     *
+     * @param[in]      compile_context The compile context to be used.
+     * @param[in, out] input1          An input tensor. Data types supported: F32. Number of channels supported: 2.
+     *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2          An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output          The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation.
+     */
+    void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication
+     *
+     * @param[in] input1   An input tensor info. Data types supported: F32. Number of channels supported: 2.
+     * @param[in] input2   An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     * @param[in] output   The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+
+private:
+    CLFillBorderKernel _border_handler;
+};
+} // namespace experimental
+
+/** Basic function to run @ref CLPixelWiseMultiplicationKernel. */
+class CLPixelWiseMultiplication : public IFunction
+{
+public:
+    /** Default Constructor */
+    CLPixelWiseMultiplication();
+    /** Default Destructor */
+    ~CLPixelWiseMultiplication();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLPixelWiseMultiplication(const CLPixelWiseMultiplication &) = delete;
+    /** Default move constructor */
+    CLPixelWiseMultiplication(CLPixelWiseMultiplication &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLPixelWiseMultiplication &operator=(const CLPixelWiseMultiplication &) = delete;
+    /** Default move assignment operator */
+    CLPixelWiseMultiplication &operator=(CLPixelWiseMultiplication &&);
     /** Initialise the kernel's inputs, output and convertion policy.
      *
      * Valid configurations (Input1,Input2) -> Output :
@@ -125,12 +248,31 @@
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
                            ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref CLComplexPixelWiseMultiplicationKernel. */
-class CLComplexPixelWiseMultiplication : public ICLSimpleFunction
+class CLComplexPixelWiseMultiplication : public IFunction
 {
 public:
+    /** Default Constructor */
+    CLComplexPixelWiseMultiplication();
+    /** Default Destructor */
+    ~CLComplexPixelWiseMultiplication();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLComplexPixelWiseMultiplication(const CLComplexPixelWiseMultiplication &) = delete;
+    /** Default move constructor */
+    CLComplexPixelWiseMultiplication(CLComplexPixelWiseMultiplication &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLComplexPixelWiseMultiplication &operator=(const CLComplexPixelWiseMultiplication &) = delete;
+    /** Default move assignment operator */
+    CLComplexPixelWiseMultiplication &operator=(CLComplexPixelWiseMultiplication &&);
     /** Initialise the kernel's inputs, output.
      *
      * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2.
@@ -160,6 +302,13 @@
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H */
diff --git a/arm_compute/runtime/CL/functions/CLQLSTMLayer.h b/arm_compute/runtime/CL/functions/CLQLSTMLayer.h
index 0aea91a..53f337b 100644
--- a/arm_compute/runtime/CL/functions/CLQLSTMLayer.h
+++ b/arm_compute/runtime/CL/functions/CLQLSTMLayer.h
@@ -26,13 +26,13 @@
 
 #include "arm_compute/core/CL/kernels/CLCopyKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h"
-#include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
 #include "arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
 #include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
 #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
 #include "arm_compute/runtime/CL/functions/CLTranspose.h"
 
 #include "arm_compute/runtime/common/LSTMParams.h"
@@ -52,7 +52,7 @@
  * -# @ref CLGEMMLowpMatrixMultiplyCore                          Quantized matrix multiplication core. Accumulators are 32-bit integers
  * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint   Convert 32-bit integers into QSYMM16
  * -# @ref CLGEMMLowpMatrixAReductionKernel                      For precomputing effective biases to use
- * -# @ref CLPixelWiseMultiplicationKernel                       Elementwise multiplication
+ * -# @ref CLPixelWiseMultiplication                       Elementwise multiplication
  * -# @ref CLTranspose                                           Transpose function for reshaping the weights
  * */
 class CLQLSTMLayer : public IFunction
@@ -113,7 +113,7 @@
                    const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
                    const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
                    const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
-                   const ICLTensor *cell_state_in, const ICLTensor *output_state_in,
+                   ICLTensor *cell_state_in, const ICLTensor *output_state_in,
                    ICLTensor *cell_state_out, ICLTensor *output_state_out, ICLTensor *output,
                    const LSTMParams<ICLTensor> &lstm_params);
 
@@ -163,7 +163,7 @@
                    const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
                    const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
                    const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
-                   const ICLTensor *cell_state_in, const ICLTensor *output_state_in,
+                   ICLTensor *cell_state_in, const ICLTensor *output_state_in,
                    ICLTensor *cell_state_out, ICLTensor *output_state_out, ICLTensor *output,
                    const LSTMParams<ICLTensor> &lstm_params);
 
@@ -306,7 +306,7 @@
     CLArithmeticAddition             _projection_bias_add{};
     CLGEMMLowpMatrixMultiplyCore     _mm_input_to_forget{};
     CLGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_forget{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_forget{};
+    CLPixelWiseMultiplication        _pixelwise_mul_cell_to_forget{};
     CLGEMMLowpOutputStage            _input_to_forget_outstage{};
     CLGEMMLowpOutputStage            _recurrent_to_forget_outstage{};
     CLGEMMLowpOutputStage            _cell_to_forget_outstage{};
@@ -325,12 +325,12 @@
     CLGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_input{};
     CLGEMMLowpOutputStage            _recurrent_to_input_outstage{};
     CLArithmeticAddition             _accumulate_input_recurrent_input{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_input{};
+    CLPixelWiseMultiplication        _pixelwise_mul_cell_to_input{};
     CLGEMMLowpOutputStage            _cell_to_input_outstage{};
     CLArithmeticAddition             _accumulate_cell_input{};
     CLActivationLayer                _input_gate_sigmoid{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_forget_cell{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_input_cell{};
+    CLPixelWiseMultiplication        _pixelwise_mul_forget_cell{};
+    CLPixelWiseMultiplication        _pixelwise_mul_input_cell{};
     CLArithmeticAddition             _add_forget_cell{};
     CLActivationLayer                _cell_clip{};
     CLGEMMLowpMatrixMultiplyCore     _mm_input_to_output{};
@@ -338,12 +338,12 @@
     CLGEMMLowpMatrixMultiplyCore     _mm_recurrent_to_output{};
     CLGEMMLowpOutputStage            _recurrent_to_output_outstage{};
     CLArithmeticAddition             _accumulate_input_recurrent_output{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_cell_to_output{};
+    CLPixelWiseMultiplication        _pixelwise_mul_cell_to_output{};
     CLGEMMLowpOutputStage            _cell_to_output_outstage{};
     CLArithmeticAddition             _accumulate_cell_to_output{};
     CLActivationLayer                _output_gate_sigmoid{};
     CLActivationLayer                _hidden_tanh{};
-    CLPixelWiseMultiplicationKernel  _pixelwise_mul_hidden{};
+    CLPixelWiseMultiplication        _pixelwise_mul_hidden{};
     CLGEMMLowpOutputStage            _hidden_outstage{};
     CLGEMMLowpMatrixMultiplyCore     _mm_projection{};
     CLGEMMLowpOutputStage            _projection_outstage{};