Optimize CPU mul layer on quantized data

Resolves : [COMPMID-5461]

Signed-off-by: Omar Al Khatib <omar.alkhatib@arm.com>
Change-Id: I89b99d267c32b00ef44f9bb6e7c714dfe4a0d29d
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8420
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
index d740091..e41e9b8 100644
--- a/src/core/NEON/wrapper/intrinsics/shr.h
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -25,21 +25,19 @@
 #ifndef ARM_COMPUTE_WRAPPER_SHR_H
 #define ARM_COMPUTE_WRAPPER_SHR_H
 
-#include <type_traits>
 #include <arm_neon.h>
+#include <type_traits>
 
 namespace arm_compute
 {
 namespace wrapper
 {
-
 #define VQRSHRN_IMPL(half_vtype, vtype, prefix, postfix) \
     template <int b>                                     \
     inline half_vtype vqrshrn(const vtype &a)            \
     {                                                    \
         return prefix##_##postfix(a, b);                 \
     }
-
 VQRSHRN_IMPL(int8x8_t, int16x8_t, vqrshrn_n, s16)
 VQRSHRN_IMPL(uint8x8_t, uint16x8_t, vqrshrn_n, u16)
 VQRSHRN_IMPL(int16x4_t, int32x4_t, vqrshrn_n, s32)
@@ -77,20 +75,38 @@
     {                                                                                                            \
         return prefix_signed##_##postfix(a, b);                                                                  \
     }                                                                                                            \
-                                                                                                                 \
+    \
     template <int b, typename T>                                                                                 \
     inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
     vqrshrn_ex(const vtype &a)                                                                                   \
     {                                                                                                            \
         return prefix_unsigned##_##postfix(a, b);                                                                \
     }
-
 VQRSHRN_EX_IMPL(int8x8_t, int16x8_t, vqrshrn_n, vqrshrun_n, s16)
 VQRSHRN_EX_IMPL(int16x4_t, int32x4_t, vqrshrn_n, vqrshrun_n, s32)
 VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
-
 #undef VQRSHRN_EX_IMPL
 
+#define VSHR_IMPL(vtype, prefix, postfix) \
+    template <int b>                      \
+    inline vtype vshr_n(const vtype &a)   \
+    {                                     \
+        return prefix##_##postfix(a, b);  \
+    }
+VSHR_IMPL(uint8x8_t, vshr_n, u8)
+VSHR_IMPL(int8x8_t, vshr_n, s8)
+#undef VSHR_IMPL
+
+#define VSHRQ_IMPL(vtype, prefix, postfix) \
+    template <int b>                       \
+    inline vtype vshrq_n(const vtype &a)   \
+    {                                      \
+        return prefix##_##postfix(a, b);   \
+    }
+VSHRQ_IMPL(uint32x4_t, vshrq_n, u32)
+VSHRQ_IMPL(int32x4_t, vshrq_n, s32)
+#undef VSHRQ_IMPL
+
 #ifdef __aarch64__
 #define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix)                       \
     template <int b, typename T>                                                                                 \
diff --git a/src/core/NEON/wrapper/intrinsics/store.h b/src/core/NEON/wrapper/intrinsics/store.h
index 6dda432..ce1b9a5 100644
--- a/src/core/NEON/wrapper/intrinsics/store.h
+++ b/src/core/NEON/wrapper/intrinsics/store.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -44,8 +44,6 @@
 VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
 VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
 VSTORE_IMPL(int32_t, int32x2_t, vst1, s32)
-//VSTORE_IMPL(uint64_t, 1, vst1, u64)
-//VSTORE_IMPL(int64_t, 1, vst1, s64)
 VSTORE_IMPL(float, float32x2_t, vst1, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VSTORE_IMPL(float16_t, float16x4_t, vst1, f16)
@@ -57,8 +55,6 @@
 VSTORE_IMPL(int16_t, int16x8_t, vst1q, s16)
 VSTORE_IMPL(uint32_t, uint32x4_t, vst1q, u32)
 VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
-//VSTORE_IMPL(uint64_t, 2, vst1q, u64)
-//VSTORE_IMPL(int64_t, 2, vst1q, s64)
 VSTORE_IMPL(float, float32x4_t, vst1q, f32)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16)
diff --git a/src/core/NEON/wrapper/intrinsics/sub.h b/src/core/NEON/wrapper/intrinsics/sub.h
index 475986d..2043671 100644
--- a/src/core/NEON/wrapper/intrinsics/sub.h
+++ b/src/core/NEON/wrapper/intrinsics/sub.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -98,6 +98,21 @@
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 #undef VQSUB_IMPL
 
+#define VSUBL_IMPL(rtype, vtype, prefix, postfix)      \
+    inline rtype vsubl(const vtype &a, const vtype &b) \
+    {                                                  \
+        return prefix##_##postfix(a, b);               \
+    }
+
+VSUBL_IMPL(int16x8_t, int8x8_t, vsubl, s8)
+VSUBL_IMPL(int32x4_t, int16x4_t, vsubl, s16)
+VSUBL_IMPL(int64x2_t, int32x2_t, vsubl, s32)
+VSUBL_IMPL(uint16x8_t, uint8x8_t, vsubl, u8)
+VSUBL_IMPL(uint32x4_t, uint16x4_t, vsubl, u16)
+VSUBL_IMPL(uint64x2_t, uint32x2_t, vsubl, u32)
+
+#undef VSUB_IMPL
+
 } // namespace wrapper
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_WRAPPER_SUB_H */