IVGCVSW-3512 Update NNAPISupport.txt for 19.08

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ie222e046f2fe832ad48d4b2279c8815f860f76d5
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 6f74097..3310f0e 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -10,7 +10,7 @@
 
 --- Support for Android Neural Networks HAL operations ---
 
-The following AndroidNN HAL 1.0 and 1.1 operations are currently supported:
+The following AndroidNN HAL 1.0, 1.1 and 1.2 operations are currently supported:
 
 AndroidNN operator           Tensor type supported
 ADD                          (FLOAT32,QUANT8_ASYMM)
@@ -28,15 +28,22 @@
 LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
 LOGISTIC                     (FLOAT32,QUANT8_ASYMM)
 LSTM                         (FLOAT32)
+MAXIMUM                      (FLOAT32,QUANT8_ASYMM)
 MAX_POOL_2D                  (FLOAT32,QUANT8_ASYMM)
 MEAN                         (FLOAT32,QUANT8_ASYMM)
+MINIMUM                      (FLOAT32,QUANT8_ASYMM)
 MUL                          (FLOAT32,QUANT8_ASYMM)
 PAD                          (FLOAT32,QUANT8_ASYMM)
+PAD_V2                       (FLOAT32,QUANT8_ASYMM)
+PRELU                        (FLOAT32,QUANT8_ASYMM)
+QUANTIZE                     (FLOAT32,QUANT8_ASYMM)
+QUANTIZED_16BIT_LSTM         (QUANT8_ASYMM)
 RELU                         (FLOAT32,QUANT8_ASYMM)
 RELU1                        (FLOAT32,QUANT8_ASYMM)
 RELU6                        (FLOAT32,QUANT8_ASYMM)
 RESHAPE                      (FLOAT32,QUANT8_ASYMM)
 RESIZE_BILINEAR              (FLOAT32,QUANT8_ASYMM)
+RESIZE_NEAREST_NEIGHBOR      (FLOAT32,QUANT8_ASYMM)
 SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
 SPACE_TO_BATCH_ND            (FLOAT32,QUANT8_ASYMM)
 SPACE_TO_DEPTH_ND            (FLOAT32,QUANT8_ASYMM)
@@ -45,19 +52,6 @@
 SUB                          (FLOAT32,QUANT8_ASYMM)
 TANH                         (FLOAT32,QUANT8_ASYMM)
 TRANSPOSE                    (FLOAT32,QUANT8_ASYMM)
-
-The following AndroidNN HAL 1.2 operations are currently supported:
-
-CONV_2D                      (FLOAT32,QUANT8_ASYMM)
-DEPTHWISE_CONV_2D            (FLOAT32,QUANT8_ASYMM)
-MAXIMUM                      (FLOAT32,QUANT8_ASYMM)
-MINIMUM                      (FLOAT32,QUANT8_ASYMM)
-PAD_V2                       (FLOAT32,QUANT8_ASYMM)
-PRELU                        (FLOAT32,QUANT8_ASYMM)
-QUANTIZE                     (FLOAT32,QUANT8_ASYMM)
-QUANTIZED_16BIT_LSTM         (QUANT8_ASYMM)
-RESIZE_NEAREST_NEIGHBOR      (FLOAT32,QUANT8_ASYMM)
-SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
 TRANSPOSE_CONV_2D            (FLOAT32,QUANT8_ASYMM)
 
 --- Unsupported operators ---
@@ -74,7 +68,6 @@
 The following AndroidNN HAL 1.2 operations are currently not supported:
 
 CONCATENATION
-LSTM
 
 Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
 appropriately and the framework implements those operations using a CPU implementation.
diff --git a/README.md b/README.md
index 99c7a25..10adc17 100644
--- a/README.md
+++ b/README.md
@@ -9,10 +9,9 @@
 ### Prerequisites
 
 1. Android source tree for Android P FSK-R3 or later, in the directory `<ANDROID_ROOT>`
+1. Android source tree for Android Q FSK-2 or later, in the directory `<ANDROID_ROOT>`
 2. Mali OpenCL driver integrated into the Android source tree
 
-Please Note: ArmNN Neural Networks driver does not currently support Mali OpenCL driver for Android Q.
-
 ### Procedure
 
 1. Place this source directory at `<ANDROID_ROOT>/vendor/arm/android-nn-driver`
@@ -34,6 +33,12 @@
 PRODUCT_PACKAGES += android.hardware.neuralnetworks@1.1-service-armnn
 </pre> `Android.mk` contains the module definition of both versions of the ArmNN driver.
 
+For Android Q, a new version of the NN API is available (1.2),
+thus the following should be added to `device.mk` instead:
+<pre>
+PRODUCT_PACKAGES += android.hardware.neuralnetworks@1.2-service-armnn
+</pre> `Android.mk` contains the module definition of both versions of the ArmNN driver.
+
 Similarly, the Neon or CL backend can be enabled/disabled by setting ARMNN_COMPUTE_CL_ENABLE or
 ARMNN_COMPUTE_NEON_ENABLE in `device.mk`:
 <pre>
@@ -41,6 +46,7 @@
 </pre>
 
 For Android P and Android Q the vendor manifest.xml requires the Neural Network HAL information.
+For Android P use HAL version 1.1 as below. For Android Q substitute 1.2 where necessary.
 ```xml
 <hal format="hidl">
     <name>android.hardware.neuralnetworks</name>