COMPMID-2772: Add support for QASYMM8_SIGNED in NEPoolingLayer

Change-Id: Ia8ef8f83eb8625a6a609e06dca89d674b07c59cd
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2628
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
index b36e10c..654dfad 100644
--- a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -54,7 +54,7 @@
      *
      * @note F16 are supported for pool sizes 2 and 3 only
      *
-     * @param[in]  input     Source tensor. Data types supported: QASYMM8/F16/F32.
+     * @param[in]  input     Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[out] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in]  pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      */
@@ -63,7 +63,7 @@
      *
      * @note F16 are supported for pool sizes 2 and 3 only
      *
-     * @param[in] input     Source tensor. Data types supported: QASYMM8/F16/F32.
+     * @param[in] input     Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[in] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      *
@@ -148,38 +148,42 @@
      * @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
      */
     void poolingMxN_f16_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
-    /** Function to perform 2x2 pooling for 8bit asymmetric fixed point.
+    /** Template function to perform 2x2 pooling for 8bit quantized fixed point. (NCHW)
      *
      * @param[in] window_input    Input region on which to execute the kernel.
      * @param[in] window          Output region on which to execute the kernel.
      * @param[in] pooling_type    Pooling operation to be computed.
      * @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
      */
-    void pooling2_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
-    /** Function to perform 3x3 pooling for 8bit quantized fixed point.
+    template <typename T>
+    void pooling2_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+    /** Template function to perform 3x3 pooling for 8bit quantized fixed point. (NCHW)
      *
      * @param[in] window_input    Input region on which to execute the kernel.
      * @param[in] window          Output region on which to execute the kernel.
      * @param[in] pooling_type    Pooling operation to be computed.
      * @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
      */
-    void pooling3_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
-    /** Function to perform MxN pooling for 8-bit quantized.
+    template <typename T>
+    void pooling3_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+    /** Template function to perform MxN pooling for 8-bit quantized. (NCHW)
      *
      * @param[in] window_input    Input region on which to execute the kernel.
      * @param[in] window          Output region on which to execute the kernel.
      * @param[in] pooling_type    Pooling operation to be computed.
      * @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
      */
-    void poolingMxN_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
-    /** Function to perform MxN pooling for 8-bit quantized. (NHWC)
+    template <typename T>
+    void poolingMxN_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+    /** Template function to perform MxN pooling for 8-bit quantized. (NHWC)
      *
      * @param[in] window_input    Input region on which to execute the kernel.
      * @param[in] window          Output region on which to execute the kernel.
      * @param[in] pooling_type    Pooling operation to be computed.
      * @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
      */
-    void poolingMxN_qasymm8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+    template <typename T>
+    void poolingMxN_q8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
     /** Common signature for all the specialised Pooling functions
      *
      * @param[in] window_input    Input region on which to execute the kernel.
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/ext.h b/arm_compute/core/NEON/wrapper/intrinsics/ext.h
index 70bc91a..f2c3dcc 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/ext.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/ext.h
@@ -36,6 +36,23 @@
         return prefix##_##postfix(value_a, value_b, size); \
     }
 
+VEXT_IMPL(uint8x8_t, vext, u8, 1)
+VEXT_IMPL(uint8x8_t, vext, u8, 2)
+VEXT_IMPL(int8x8_t, vext, s8, 1)
+VEXT_IMPL(int8x8_t, vext, s8, 2)
+VEXT_IMPL(uint16x4_t, vext, u16, 1)
+VEXT_IMPL(uint16x4_t, vext, u16, 2)
+VEXT_IMPL(int16x4_t, vext, s16, 1)
+VEXT_IMPL(int16x4_t, vext, s16, 2)
+
+VEXT_IMPL(uint8x16_t, vextq, u8, 1)
+VEXT_IMPL(uint8x16_t, vextq, u8, 2)
+VEXT_IMPL(int8x16_t, vextq, s8, 1)
+VEXT_IMPL(int8x16_t, vextq, s8, 2)
+VEXT_IMPL(uint16x8_t, vextq, u16, 1)
+VEXT_IMPL(uint16x8_t, vextq, u16, 2)
+VEXT_IMPL(int16x8_t, vextq, s16, 1)
+VEXT_IMPL(int16x8_t, vextq, s16, 2)
 VEXT_IMPL(int32x4_t, vextq, s32, 1)
 VEXT_IMPL(int32x4_t, vextq, s32, 2)
 
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 26b4322..a7af352 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -66,5 +66,6 @@
 #include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/sub.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/tanh.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/tbl.h"
 
 #endif /* ARM_COMPUTE_WRAPPER_INTRINSICS_H */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h
index 0fdc46b..eb2ae6a 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/store.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,7 +37,9 @@
     }
 
 VSTORE_IMPL(uint8_t, uint8x8_t, vst1, u8)
+VSTORE_IMPL(uint8_t, uint8x8x2_t, vst2, u8)
 VSTORE_IMPL(int8_t, int8x8_t, vst1, s8)
+VSTORE_IMPL(int8_t, int8x8x2_t, vst2, s8)
 VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16)
 VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
 VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/tbl.h b/arm_compute/core/NEON/wrapper/intrinsics/tbl.h
new file mode 100644
index 0000000..d3d6b72
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/tbl.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_TBL_H
+#define ARM_COMPUTE_WRAPPER_TBL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VTBL_IMPL(stype, vtype, prefix, postfix)      \
+    inline vtype vtbl(const stype &a, const vtype &b) \
+    {                                                 \
+        return prefix##_##postfix(a, b);              \
+    }
+
+VTBL_IMPL(uint8x8x2_t, uint8x8_t, vtbl2, u8)
+VTBL_IMPL(int8x8x2_t, int8x8_t, vtbl2, s8)
+
+#undef VTBL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_TBL_H */
diff --git a/arm_compute/core/NEON/wrapper/traits.h b/arm_compute/core/NEON/wrapper/traits.h
index 0a9015e..ae77d27 100644
--- a/arm_compute/core/NEON/wrapper/traits.h
+++ b/arm_compute/core/NEON/wrapper/traits.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -42,6 +42,7 @@
 
 /** Create the appropriate NEON vector given its type and size in terms of elements */
 template <typename T, int S> struct neon_vector;
+
 // Specializations
 #ifndef DOXYGEN_SKIP_THIS
 template <> struct neon_vector<uint8_t, 8>{ using scalar_type = uint8_t; using type = uint8x8_t; using tag_type = vector_64_tag; };
@@ -51,7 +52,9 @@
 template <> struct neon_vector<uint16_t, 4>{ using scalar_type = uint16_t; using type = uint16x4_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<int16_t, 4>{ using scalar_type = int16_t; using type = int16x4_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<uint16_t, 8>{ using scalar_type = uint16_t; using type = uint16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint16_t, 16>{ using scalar_type = uint16_t; using type = uint16x8x2_t; };
 template <> struct neon_vector<int16_t, 8>{ using scalar_type = int16_t; using type = int16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int16_t, 16>{ using scalar_type = int16_t; using type = int16x8x2_t; };
 template <> struct neon_vector<uint32_t, 2>{ using scalar_type = uint32_t; using type = uint32x2_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<int32_t, 2>{ using scalar_type = int32_t; using type = int32x2_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<uint32_t, 4>{ using scalar_type = uint32_t; using type = uint32x4_t; using tag_type = vector_128_tag; };
@@ -62,6 +65,7 @@
 template <> struct neon_vector<int64_t, 2>{ using scalar_type = int64_t; using type = int64x2_t; using tag_type = vector_128_tag; };
 template <> struct neon_vector<float_t, 2>{ using scalar_type = float_t; using type = float32x2_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<float_t, 4>{ using scalar_type = float_t; using type = float32x4_t; using tag_type = vector_128_tag; };
+
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 template <> struct neon_vector<float16_t, 4>{ using scalar_type = float16_t; using type = float16x4_t; using tag_type = vector_64_tag; };
 template <> struct neon_vector<float16_t, 8>{ using scalar_type = float16_t; using type = float16x8_t; using tag_type = vector_128_tag; };
diff --git a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
index ae08ce6..eb840b5 100644
--- a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -48,7 +48,7 @@
      *
      * @note F16 is supported for pool sizes 2 and 3 only
      *
-     * @param[in, out] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
+     * @param[in, out] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[out]     output    Destination tensor. Data types supported: Same as @p input.
      * @param[in]      pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      */
@@ -57,7 +57,7 @@
      *
      * @note F16 is supported for pool sizes 2 and 3 only
      *
-     * @param[in] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
+     * @param[in] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[in] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      *