IVGCVSW-3093 Update NNAPISupport.txt for 19.05

Change-Id: I51b7a40214945ba89ff2fc4f44d86f47d2e9b13e
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index c8f77e5..3641251 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -12,35 +12,35 @@
 The following AndroidNN operations are currently supported.
 
 AndroidNN operator           Tensor type supported
-ADD                          (FLOAT32,QUANT8_ASYMM)
+ADD                          (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 AVERAGE_POOL_2D              (FLOAT32,QUANT8_ASYMM)
 BATCH_TO_SPACE_ND            (FLOAT32,QUANT8_ASYMM)
-CONCATENATION                (FLOAT32)
+CONCATENATION                (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 CONV_2D                      (FLOAT32,QUANT8_ASYMM)
 DEPTHWISE_CONV_2D*           (FLOAT32,QUANT8_ASYMM)
-DIV                          (FLOAT32,QUANT8_ASYMM)
+DIV                          (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 FLOOR                        (FLOAT32)
 FULLY_CONNECTED              (FLOAT32,QUANT8_ASYMM)
 L2_NORMALIZATION             (FLOAT32)
-L2_POOL_2D                   (FLOAT32)
+L2_POOL_2D                   (FLOAT32,QUANT8_ASYMM)
 LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
-LOGISTIC                     (FLOAT32,QUANT8_ASYMM)
+LOGISTIC                     (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 LSTM                         (FLOAT32)
 MAX_POOL_2D                  (FLOAT32,QUANT8_ASYMM)
 MEAN                         (FLOAT32,QUANT8_ASYMM)
-MUL                          (FLOAT32,QUANT8_ASYMM)
+MUL                          (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 PAD                          (FLOAT32,QUANT8_ASYMM)
-RELU                         (FLOAT32,QUANT8_ASYMM)
-RELU1                        (FLOAT32,QUANT8_ASYMM)
-RELU6                        (FLOAT32,QUANT8_ASYMM)
+RELU                         (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
+RELU1                        (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
+RELU6                        (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 RESHAPE                      (FLOAT32,QUANT8_ASYMM)
-RESIZE_BILINEAR              (FLOAT32)
+RESIZE_BILINEAR              (FLOAT32,QUANT8_ASYMM)
 SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
 SPACE_TO_BATCH_ND            (FLOAT32,QUANT8_ASYMM)
 SQUEEZE                      (FLOAT32,QUANT8_ASYMM)
 STRIDED_SLICE                (FLOAT32,QUANT8_ASYMM)
-SUB                          (FLOAT32,QUANT8_ASYMM)
-TANH                         (FLOAT32)
+SUB                          (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
+TANH                         (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM)
 TRANSPOSE                    (FLOAT32,QUANT8_ASYMM)
 
 * Depthwise convolution only supports a value of 1 for the depth multiplier. In addition, the QUANT8_ASYMM version only supports 3x3 kernels.