GitHub #557 wrong result in int8 model

 * Added support for qasymms8 (int8) to ImageTensorGenerator
 * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator
 * Added support for qasymms8 (int8) to ExecuteNetwork
 * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork
 * Set tflite to be the default model format in ImageTensorGenerator as
   it's the only supported model format.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index bce8358..a9b5a3c 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -155,7 +155,8 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
-        else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
+        else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 ||
+                 params.m_InputTypes[inputIndex].compare("qasymmu8") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
 
@@ -175,6 +176,26 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
+        else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
+        {
+            auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
+
+            if(inputData == NULL)
+            {
+                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+                return EXIT_FAILURE;
+            }
+
+            std::vector<int8_t> tensorData;
+            PopulateTensorWithDataGeneric<int8_t>(tensorData,
+                                                  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+                                                  dataFile,
+                                                  [](const std::string& s)
+                                                  { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+
+            std::copy(tensorData.begin(), tensorData.end(), inputData);
+        }
         else
         {
             ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
@@ -245,7 +266,8 @@
                     printf("%d ", tfLiteDelageOutputData[i]);
                 }
             }
-            else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
+            else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 ||
+                     params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
                 if(tfLiteDelageOutputData == NULL)
@@ -374,13 +396,17 @@
                 if (params.m_OutputTypes[i].compare("float") == 0)
                 {
                     outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("int") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("int") == 0)
                 {
                     outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("qasymm8") == 0 ||
+                         params.m_OutputTypes[i].compare("qasymmu8") == 0)
                 {
                     outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
-                } else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
+                }
+                else if (params.m_OutputTypes[i].compare("qasymms8") == 0)
                 {
                     outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
                 } else
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 25ddecf..b12547f 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -232,7 +232,7 @@
                  cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
 
                 ("q,quantize-input",
-                 "If this option is enabled, all float inputs will be quantized to qasymm8. "
+                 "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
                  "If unset, default to not quantized. Accepted values (true or false)",
                  cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
 
@@ -264,13 +264,13 @@
                 ("y,input-type",
                  "The type of the input tensors in the network separated by comma. "
                  "If unset, defaults to \"float\" for all defined inputs. "
-                 "Accepted values (float, int or qasymm8).",
+                 "Accepted values (float, int, qasymms8 or qasymmu8).",
                  cxxopts::value<std::string>())
 
                 ("z,output-type",
                  "The type of the output tensors in the network separated by comma. "
                  "If unset, defaults to \"float\" for all defined outputs. "
-                 "Accepted values (float, int or qasymm8).",
+                 "Accepted values (float, int,  qasymms8 or qasymmu8).",
                  cxxopts::value<std::string>())
 
                 ("T,tflite-executor",
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index a2110f9..b443255 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -164,15 +164,16 @@
                 ("f,model-format",
                     "Format of the intended model file that uses the images."
                     "Different formats have different image normalization styles."
+                    "If unset, defaults to tflite."
                     "Accepted value (tflite)",
-                    cxxopts::value<std::string>(m_ModelFormat))
+                    cxxopts::value<std::string>(m_ModelFormat)->default_value("tflite"))
                 ("o,outfile",
                     "Output raw tensor file path",
                     cxxopts::value<std::string>(m_OutputFileName))
                 ("z,output-type",
                     "The data type of the output tensors."
                     "If unset, defaults to \"float\" for all defined inputs. "
-                    "Accepted values (float, int or qasymm8)",
+                    "Accepted values (float, int, qasymms8 or qasymmu8)",
                     cxxopts::value<std::string>(m_OutputType)->default_value("float"))
                 ("new-width",
                     "Resize image to new width. Keep original width if unspecified",
@@ -254,10 +255,14 @@
         {
             return armnn::DataType::Signed32;
         }
-        else if (m_OutputType == "qasymm8")
+        else if (m_OutputType == "qasymm8" || m_OutputType == "qasymmu8")
         {
             return armnn::DataType::QAsymmU8;
         }
+        else if (m_OutputType == "qasymms8")
+        {
+            return armnn::DataType::QAsymmS8;
+        }
         else
         {
             throw armnn::Exception("Unsupported input type" + m_OutputType);
@@ -292,7 +297,8 @@
     const unsigned int batchSize = 1;
     const armnn::DataLayout outputLayout(cmdline.GetLayout());
 
-    using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
+    using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>,
+                                             std::vector<int8_t>>;
     std::vector<TContainer> imageDataContainers;
     const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
     try
@@ -307,6 +313,10 @@
                 imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
                     imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
                 break;
+            case armnn::DataType::QAsymmS8:
+                imageDataContainers.push_back(PrepareImageTensor<int8_t>(
+                        imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
+                break;
             case armnn::DataType::Float32:
             default:
                 imageDataContainers.push_back(PrepareImageTensor<float>(
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index 5aa2ca8..6d2e549 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -56,6 +56,10 @@
                     normParams.mean = { 128.0, 128.0, 128.0 };
                     break;
                 case armnn::DataType::QAsymmU8:
+                    break;
+                case armnn::DataType::QAsymmS8:
+                    normParams.mean = { 128.0, 128.0, 128.0 };
+                    break;
                 default:
                     break;
             }
@@ -138,7 +142,7 @@
     return imageDataInt;
 }
 
-// Prepare qasymm8 image tensor
+// Prepare qasymmu8 image tensor
 template <>
 std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
                                                  unsigned int newWidth,
@@ -158,6 +162,26 @@
     return imageDataQasymm8;
 }
 
+// Prepare qasymms8 image tensor
+template <>
+std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
+                                               unsigned int newWidth,
+                                               unsigned int newHeight,
+                                               const NormalizationParameters& normParams,
+                                               unsigned int batchSize,
+                                               const armnn::DataLayout& outputLayout)
+{
+    // Get float32 image tensor
+    std::vector<float> imageDataFloat =
+            PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
+    std::vector<int8_t> imageDataQasymms8;
+    imageDataQasymms8.reserve(imageDataFloat.size());
+    // Convert to uint8 image tensor with static cast
+    std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
+                   [](float val) { return static_cast<uint8_t>(val); });
+    return imageDataQasymms8;
+}
+
 /** Write image tensor to ofstream
  *
  * @param[in] imageData         Image tensor data
@@ -176,3 +200,11 @@
 {
     std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
 }
+
+// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
+// numerical values
+template <>
+void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
+{
+    std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
+}
\ No newline at end of file
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 23b892f..0906c1c 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -40,6 +40,13 @@
 }
 
 template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
+{
+    return ParseArrayImpl<int8_t>(stream,
+                                  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+template<>
 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
 {
     return ParseArrayImpl<uint8_t>(stream,
@@ -54,7 +61,20 @@
                                    [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
 }
 
-
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
+                                               const float& quantizationScale,
+                                               const int32_t& quantizationOffset)
+{
+    return ParseArrayImpl<int8_t>(stream,
+                                  [&quantizationScale, &quantizationOffset](const std::string& s)
+                                  {
+                                      return armnn::numeric_cast<int8_t>(
+                                              armnn::Quantize<int8_t>(std::stof(s),
+                                                                      quantizationScale,
+                                                                      quantizationOffset));
+                                  });
+}
 
 template<>
 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
@@ -232,12 +252,18 @@
                      ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
                      GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
     }
-    else if (dataTypeStr.compare("qasymm8") == 0)
+    else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
     {
         tensorData = readFromFile ?
                      ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
                      GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
     }
+    else if (dataTypeStr.compare("qasymms8") == 0)
+    {
+        tensorData = readFromFile ?
+                     ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
+                     GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
+    }
     else
     {
         std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;