GitHub #557 wrong result in int8 model

 * Added support for qasymms8 (int8) to ImageTensorGenerator
 * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator
 * Added support for qasymms8 (int8) to ExecuteNetwork
 * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork
 * Set tflite to be the default model format in ImageTensorGenerator as
   it's the only supported model format.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 23b892f..0906c1c 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -40,6 +40,13 @@
 }
 
 template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
+{
+    return ParseArrayImpl<int8_t>(stream,
+                                  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+template<>
 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
 {
     return ParseArrayImpl<uint8_t>(stream,
@@ -54,7 +61,20 @@
                                    [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
 }
 
-
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
+                                               const float& quantizationScale,
+                                               const int32_t& quantizationOffset)
+{
+    return ParseArrayImpl<int8_t>(stream,
+                                  [&quantizationScale, &quantizationOffset](const std::string& s)
+                                  {
+                                      return armnn::numeric_cast<int8_t>(
+                                              armnn::Quantize<int8_t>(std::stof(s),
+                                                                      quantizationScale,
+                                                                      quantizationOffset));
+                                  });
+}
 
 template<>
 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
@@ -232,12 +252,18 @@
                      ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
                      GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
     }
-    else if (dataTypeStr.compare("qasymm8") == 0)
+    else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
     {
         tensorData = readFromFile ?
                      ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
                      GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
     }
+    else if (dataTypeStr.compare("qasymms8") == 0)
+    {
+        tensorData = readFromFile ?
+                     ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
+                     GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
+    }
     else
     {
         std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;