COMPMID-1826: Add support for QASYMM8 in NEArithmeticAdditionKernel

Change-Id: Ia7fb128e1f3944d0d831e1d125a6db3e1d257106
Reviewed-on: https://review.mlplatform.org/355
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com>
Reviewed-by: Anthony Barbier <Anthony.barbier@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
index 8cf21ea..73beca6 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
@@ -56,25 +56,26 @@
      *
      * Valid configurations (Input1,Input2) -> Output :
      *
-     *   - (U8,U8)     -> U8
-     *   - (U8,U8)     -> S16
-     *   - (S16,U8)    -> S16
-     *   - (U8,S16)    -> S16
-     *   - (S16,S16)   -> S16
-     *   - (F16,F16)   -> F16
-     *   - (F32,F32)   -> F32
+     *   - (U8,U8)           -> U8
+     *   - (U8,U8)           -> S16
+     *   - (S16,U8)          -> S16
+     *   - (U8,S16)          -> S16
+     *   - (S16,S16)         -> S16
+     *   - (F16,F16)         -> F16
+     *   - (F32,F32)         -> F32
+     *   - (QASYMM8,QASYMM8) -> QASYMM8
      *
-     * @param[in]  input1 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[in]  input2 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32.
+     * @param[in]  input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in]  input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
      * @param[in]  policy Overflow policy.
      */
     void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
     /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAdditionKernel
      *
-     * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[in] output The output tensor. Data types supported: U8/S16/F16/F32.
+     * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
      * @param[in] policy Overflow policy.
      *
      * @return a status
@@ -88,9 +89,9 @@
 private:
     /** Common signature for all the specialised add functions
      *
-     * @param[in]  input1 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[in]  input2 An input tensor. Data types supported: U8/S16/F16/F32
-     * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32.
+     * @param[in]  input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in]  input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
      * @param[in]  window Region on which to execute the kernel.
      */
     using AddFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index c296463..e35f2fa 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -37,22 +37,22 @@
 public:
     /** Initialise the kernel's inputs, output and conversion policy.
      *
-     * @param[in]  input1 First tensor input. Data types supported: U8/S16/F16/F32
-     * @param[in]  input2 Second tensor input. Data types supported: U8/S16/F16/F32
-     * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
+     * @param[in]  input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in]  input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
     /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition
      *
-     * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
-     * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
-     * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
+     * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+     * @param[in] output Output tensor. Data types supported: U8/SQASYMM8/16/F16/F32
      * @param[in] policy Policy to use to handle overflow.
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
 };
-}
+} // namespace arm_compute
 #endif /*__ARM_COMPUTE_NEARITHMETICADDITION_H__ */