MLCE-159 Add QAsymmS8 to ArmnnQuantizer

 * Allow per layer quantization from Fp32 to Int8 (QAsymmS8) like TfLite

Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: I5bbf770aa29d81af3568c15b47d2b2c18e55bb28
diff --git a/src/armnnQuantizer/ArmNNQuantizerMain.cpp b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
index 30167e7..219363e 100644
--- a/src/armnnQuantizer/ArmNNQuantizerMain.cpp
+++ b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
@@ -36,9 +36,19 @@
     inputFileStream.close();
 
     armnn::QuantizerOptions quantizerOptions;
-    quantizerOptions.m_ActivationFormat = cmdline.GetQuantizationScheme() == "QSymm16"
-                                          ? armnn::DataType::QSymmS16
-                                          : armnn::DataType::QAsymmU8;
+
+    if (cmdline.GetQuantizationScheme() == "QAsymmS8")
+    {
+        quantizerOptions.m_ActivationFormat = armnn::DataType::QAsymmS8;
+    }
+    else if (cmdline.GetQuantizationScheme() == "QSymmS16")
+    {
+        quantizerOptions.m_ActivationFormat = armnn::DataType::QSymmS16;
+    }
+    else
+    {
+        quantizerOptions.m_ActivationFormat = armnn::DataType::QAsymmU8;
+    }
 
     quantizerOptions.m_PreserveType = cmdline.HasPreservedDataType();