GitHub #557 wrong result in int8 model

 * Added support for qasymms8 (int8) to ImageTensorGenerator
 * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator
 * Added support for qasymms8 (int8) to ExecuteNetwork
 * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork
 * Set tflite to be the default model format in ImageTensorGenerator as
   it's the only supported model format.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index bce8358..a9b5a3c 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -155,7 +155,8 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
-        else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
+        else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 ||
+                 params.m_InputTypes[inputIndex].compare("qasymmu8") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
 
@@ -175,6 +176,26 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
+        else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
+        {
+            auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
+
+            if(inputData == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
+            std::vector<int8_t> tensorData;
+            PopulateTensorWithDataGeneric<int8_t>(tensorData,
+                                                  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+                                                  dataFile,
+                                                  [](const std::string& s)
+                                                  { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+
+            std::copy(tensorData.begin(), tensorData.end(), inputData);
+        }
         else
         {
             ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
@@ -245,7 +266,8 @@
                     printf("%d ", tfLiteDelageOutputData[i]);
                 }
             }
-            else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
+            else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 ||
+                     params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
                 if(tfLiteDelageOutputData == NULL)
@@ -374,13 +396,17 @@
                 if (params.m_OutputTypes[i].compare("float") == 0)
                 {
                     outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("int") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("int") == 0)
                 {
                     outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("qasymm8") == 0 ||
+                         params.m_OutputTypes[i].compare("qasymmu8") == 0)
                 {
                     outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("qasymms8") == 0)
                 {
                     outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
                 } else