COMPMID-3377: Async support to NEElementwiseUnaryLayerKernel kernels/functions

Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Change-Id: I208287b44ece051e95f891d43a691cb0ac6e56c5
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3419
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
index cac105c..08f798e 100644
--- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
+++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
@@ -25,7 +25,8 @@
 #define ARM_COMPUTE_NEELEMENTWISEOPERATIONS_H
 
 #include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunction.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/NEON/INEOperator.h"
 
 namespace arm_compute
 {
@@ -36,9 +37,21 @@
  * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
  * @note The function performs a max operation between two tensors.
  */
-class NEElementwiseMax : public INESimpleFunction
+class NEElementwiseMax : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseMax();
+    /** Default Destructor */
+    ~NEElementwiseMax();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseMax(const NEElementwiseMax &) = delete;
+    /** Default move constructor */
+    NEElementwiseMax(NEElementwiseMax &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseMax &operator=(const NEElementwiseMax &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseMax &operator=(NEElementwiseMax &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1   First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
@@ -57,6 +70,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEArithmeticOperationKernel for min
@@ -64,9 +84,21 @@
  * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
  * @note The function performs a min operation between two tensors.
  */
-class NEElementwiseMin : public INESimpleFunction
+class NEElementwiseMin : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseMin();
+    /** Default Destructor */
+    ~NEElementwiseMin();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseMin(const NEElementwiseMin &) = delete;
+    /** Default move constructor */
+    NEElementwiseMin(NEElementwiseMin &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseMin &operator=(const NEElementwiseMin &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseMin &operator=(NEElementwiseMin &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1   First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
@@ -85,6 +117,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEArithmeticOperationKernel for squared difference
@@ -92,9 +131,21 @@
  * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
  * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
  */
-class NEElementwiseSquaredDiff : public INESimpleFunction
+class NEElementwiseSquaredDiff : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseSquaredDiff();
+    /** Default Destructor */
+    ~NEElementwiseSquaredDiff();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseSquaredDiff(const NEElementwiseSquaredDiff &) = delete;
+    /** Default move constructor */
+    NEElementwiseSquaredDiff(NEElementwiseSquaredDiff &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseSquaredDiff &operator=(const NEElementwiseSquaredDiff &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseSquaredDiff &operator=(NEElementwiseSquaredDiff &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1   First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
@@ -113,6 +164,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEArithmeticOperationKernel for division
@@ -120,9 +178,21 @@
  * @note The tensor data type for the inputs must be F16/F32.
  * @note The function performs a squared different operation between two tensors (i.e., out[i] = in1[i] / in2[i])
  */
-class NEElementwiseDivision : public INESimpleFunction
+class NEElementwiseDivision : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseDivision();
+    /** Default Destructor */
+    ~NEElementwiseDivision();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseDivision(const NEElementwiseDivision &) = delete;
+    /** Default move constructor */
+    NEElementwiseDivision(NEElementwiseDivision &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseDivision &operator=(const NEElementwiseDivision &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseDivision &operator=(NEElementwiseDivision &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1   First tensor input. Data types supported: F16/F32.
@@ -141,6 +211,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEArithmeticOperationKernel for power
@@ -149,9 +226,21 @@
  * @note The function performs a elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
  * @note For an exponent that is a float, this function will only work with a positive base.
  */
-class NEElementwisePower : public INESimpleFunction
+class NEElementwisePower : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwisePower();
+    /** Default Destructor */
+    ~NEElementwisePower();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwisePower(const NEElementwisePower &) = delete;
+    /** Default move constructor */
+    NEElementwisePower(NEElementwisePower &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwisePower &operator=(const NEElementwisePower &) = delete;
+    /** Default move assignment operator */
+    NEElementwisePower &operator=(NEElementwisePower &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1   First tensor input. Data types supported: F16/F32.
@@ -170,6 +259,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEComparisonOperationKernel.
@@ -177,9 +273,21 @@
  * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
  * @note The function performs a comparison operation between two tensors.
  */
-class NEElementwiseComparison : public INESimpleFunction
+class NEElementwiseComparison : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseComparison();
+    /** Default Destructor */
+    ~NEElementwiseComparison();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseComparison(const NEElementwiseComparison &) = delete;
+    /** Default move constructor */
+    NEElementwiseComparison(NEElementwiseComparison &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseComparison &operator=(const NEElementwiseComparison &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseComparison &operator=(NEElementwiseComparison &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
@@ -198,6 +306,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run @ref NEComparisonOperationKernel
@@ -206,9 +321,21 @@
  * @note The function performs a comparison operation between two tensors.
  */
 template <ComparisonOperation op>
-class NEElementwiseComparisonStatic : public INESimpleFunction
+class NEElementwiseComparisonStatic : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEElementwiseComparisonStatic();
+    /** Default Destructor */
+    ~NEElementwiseComparisonStatic();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseComparisonStatic(const NEElementwiseComparisonStatic &) = delete;
+    /** Default move constructor */
+    NEElementwiseComparisonStatic(NEElementwiseComparisonStatic &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEElementwiseComparisonStatic &operator=(const NEElementwiseComparisonStatic &) = delete;
+    /** Default move assignment operator */
+    NEElementwiseComparisonStatic &operator=(NEElementwiseComparisonStatic &&);
     /** Initialise the kernel's inputs, output and conversion policy.
      *
      * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
@@ -225,6 +352,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 
 /** Basic function to run equal comparison. */
@@ -239,5 +373,238 @@
 using NELess = NEElementwiseComparisonStatic<ComparisonOperation::Less>;
 /** Basic function to run less-equal comparison. */
 using NELessEqual = NEElementwiseComparisonStatic<ComparisonOperation::LessEqual>;
+
+namespace experimental
+{
+/** Basic function to run @ref NEArithmeticOperationKernel for max
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @note The function performs a max operation between two tensors.
+ */
+class NEElementwiseMax : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in, out] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for max
+     *
+     * @param[in] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEArithmeticOperationKernel for min
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @note The function performs a min operation between two tensors.
+ */
+class NEElementwiseMin : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in, out] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for min
+     *
+     * @param[in] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEArithmeticOperationKernel for squared difference
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
+ */
+class NEElementwiseSquaredDiff : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in, out] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for squared difference
+     *
+     * @param[in] input1   First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEArithmeticOperationKernel for division
+ *
+ * @note The tensor data type for the inputs must be F16/F32.
+ * @note The function performs a squared different operation between two tensors (i.e., out[i] = in1[i] / in2[i])
+ */
+class NEElementwiseDivision : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1   First tensor input info. Data types supported: F16/F32.
+     * @param[in, out] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for division
+     *
+     * @param[in] input1   First tensor input info. Data types supported: F16/F32.
+     * @param[in] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEArithmeticOperationKernel for power
+ *
+ * @note The tensor data type for the inputs must be F16/F32.
+ * @note The function performs a elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
+ * @note For an exponent that is a float, this function will only work with a positive base.
+ */
+class NEElementwisePower : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1   First tensor input info. Data types supported: F16/F32.
+     * @param[in, out] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for power
+     *
+     * @param[in] input1   First tensor input info. Data types supported: F16/F32.
+     * @param[in] input2   Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output   Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEComparisonOperationKernel.
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @note The function performs a comparison operation between two tensors.
+ */
+class NEElementwiseComparison : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output Output tensor info. Data types supported: U16/U32.
+     * @param[in]      op     Comparison Operation to be performed.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ComparisonOperation op);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+     *
+     * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor info. Data types supported: U16/U32.
+     * @param[in] op     Comparison Operation to be performed.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op);
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run @ref NEComparisonOperationKernel
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @note The function performs a comparison operation between two tensors.
+ */
+template <ComparisonOperation op>
+class NEElementwiseComparisonStatic : public INEOperator
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[out]     output Output tensor info. Data types supported: U16/U32.
+     */
+    void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+     *
+     * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor info. Data types supported: U16/U32.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    MemoryRequirements workspace() const override;
+};
+
+/** Basic function to run equal comparison. */
+using NEEqual = NEElementwiseComparisonStatic<ComparisonOperation::Equal>;
+/** Basic function to run not equal comparison. */
+using NENotEqual = NEElementwiseComparisonStatic<ComparisonOperation::NotEqual>;
+/** Basic function to run greater comparison. */
+using NEGreater = NEElementwiseComparisonStatic<ComparisonOperation::Greater>;
+/** Basic function to run greater-equal comparison. */
+using NEGreaterEqual = NEElementwiseComparisonStatic<ComparisonOperation::GreaterEqual>;
+/** Basic function to run less comparison. */
+using NELess = NEElementwiseComparisonStatic<ComparisonOperation::Less>;
+/** Basic function to run less-equal comparison. */
+using NELessEqual = NEElementwiseComparisonStatic<ComparisonOperation::LessEqual>;
+} // namespace experimental
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NEELEMENTWISEOPERATIONS_H */