COMPMID-3460: Refactor NEElementwiseUnaryKernel

Removed most of the templates and refactored the code.
Performance is the same but the libary size dropped by
52Kb.

Change-Id: I41ff0c0853c923d925cdaeb05f4a58c9086fff94
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3190
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
index 2a4a8f8..9a41cec 100644
--- a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
+++ b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -78,23 +78,26 @@
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
 
+private:
     /** Common signature for all the specialised arithmetic functions
      *
-     * @param[in]  input  An input tensor. Data types supported: F16/F32.
-     * @param[out] output The output tensor. Data types supported: F16/F32.
-     * @param[in]  window Region on which to execute the kernel.
+     * @param[in] window Region on which to execute the kernel.
      */
-    using ElementwiseUnaryFunction = void(const ITensor *input, ITensor *output, const Window &window);
+    using ElementwiseUnaryPtr = void (NEElementwiseUnaryKernel::*)(const Window &window);
 
-protected:
-    // Inherited methods overridden:
-    static Status validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output);
+    /** Template function to run elementwise unary operation
+     *
+     * @tparam ScalarType Scalar datatype
+     *
+     * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+     */
+    template <typename ScalarType>
+    void elementwise_op(const Window &window);
 
-    /** Function to use for the particular tensor types passed to configure() */
-    std::function<void(const ITensor *input, ITensor *output, const Window &window)> _function;
-
-    const ITensor *_input;
-    ITensor       *_output;
+    ElementwiseUnaryPtr _func;
+    const ITensor      *_input;
+    ITensor            *_output;
+    ElementWiseUnary    _op;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NEELEMENTWISEUNARYKERNEL_H */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/exp.h b/arm_compute/core/NEON/wrapper/intrinsics/exp.h
index f079af0..4b17ebd 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/exp.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/exp.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,10 +37,18 @@
         return vexpq_##postfix(a);     \
     }
 
+#define VEXPQ_IMPL_INT(vtype, postfix)      \
+    inline vtype vexpq(const vtype &a)      \
+    {                                       \
+        ARM_COMPUTE_UNUSED(a);              \
+        ARM_COMPUTE_ERROR("Not supported"); \
+    }
+
 VEXPQ_IMPL(float32x4_t, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VEXPQ_IMPL(float16x8_t, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VEXPQ_IMPL_INT(int32x4_t, s32)
 #undef VEXPQ_IMPL
 
 } // namespace wrapper
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h
index 2bf9f52..77adcf7b 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,10 +37,18 @@
         return prefix##_##postfix(a);                \
     }
 
+#define VINVSQRT_IMPL_INT(stype, vtype, prefix, postfix) \
+    inline vtype vinvsqrt(const vtype &a)                \
+    {                                                    \
+        ARM_COMPUTE_UNUSED(a);                           \
+        ARM_COMPUTE_ERROR("Not supported");              \
+    }
+
 VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL_INT(int, int32x4_t, vinvsqrt, s32)
 
 VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/log.h b/arm_compute/core/NEON/wrapper/intrinsics/log.h
index bb4181e..682830c 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/log.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/log.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,10 +37,19 @@
         return prefix##_##postfix(a);     \
     }
 
+#define VLOG_IMPL_INT(vtype, prefix, postfix) \
+    inline vtype vlog(const vtype &a)         \
+    {                                         \
+        ARM_COMPUTE_UNUSED(a);                \
+        ARM_COMPUTE_ERROR("Not supported");   \
+    }
+
 VLOG_IMPL(float32x4_t, vlogq, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VLOG_IMPL(float16x8_t, vlogq, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VLOG_IMPL_INT(int32x4_t, vlogq, s32)
+
 #undef VLOG_IMPL
 } // namespace wrapper
 } // namespace arm_compute
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/round.h b/arm_compute/core/NEON/wrapper/intrinsics/round.h
index f3e0fe1..d6f5a88 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/round.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/round.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,10 +37,18 @@
         return vroundq_rte_##postfix(a); \
     }
 
+#define VROUNDQ_IMPL_INT(vtype, postfix)    \
+    inline vtype vround(const vtype &a)     \
+    {                                       \
+        ARM_COMPUTE_UNUSED(a);              \
+        ARM_COMPUTE_ERROR("Not supported"); \
+    }
+
 VROUNDQ_IMPL(float32x4_t, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VROUNDQ_IMPL(float16x8_t, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VROUNDQ_IMPL_INT(int32x4_t, s32)
 #undef VROUNDQ_IMPL
 
 } // namespace wrapper
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sin.h b/arm_compute/core/NEON/wrapper/intrinsics/sin.h
index e0fe5fb..bca72db 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/sin.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/sin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,11 +37,20 @@
         return prefix##_##postfix(a);     \
     }
 
+#define VSIN_IMPL_INT(vtype, prefix, postfix) \
+    inline vtype vsin(const vtype &a)         \
+    {                                         \
+        ARM_COMPUTE_UNUSED(a);                \
+        ARM_COMPUTE_ERROR("Not supported");   \
+    }
+
 VSIN_IMPL(float32x4_t, vsinq, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VSIN_IMPL(float16x8_t, vsinq, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 
+VSIN_IMPL_INT(int32x4_t, vsinq, s32)
+
 #undef vsub_IMPL
 } // namespace wrapper
 } // namespace arm_compute