GitHub #557 wrong result in int8 model

 * Added support for qasymms8 (int8) to ImageTensorGenerator
 * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator
 * Added support for qasymms8 (int8) to ExecuteNetwork
 * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork
 * Set tflite to be the default model format in ImageTensorGenerator as
   it's the only supported model format.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index 5aa2ca8..6d2e549 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -56,6 +56,10 @@
                     normParams.mean = { 128.0, 128.0, 128.0 };
                     break;
                 case armnn::DataType::QAsymmU8:
+                    break;
+                case armnn::DataType::QAsymmS8:
+                    normParams.mean = { 128.0, 128.0, 128.0 };
+                    break;
                 default:
                     break;
             }
@@ -138,7 +142,7 @@
     return imageDataInt;
 }
 
-// Prepare qasymm8 image tensor
+// Prepare qasymmu8 image tensor
 template <>
 std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
                                                  unsigned int newWidth,
@@ -158,6 +162,26 @@
     return imageDataQasymm8;
 }
 
+// Prepare qasymms8 image tensor
+template <>
+std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
+                                               unsigned int newWidth,
+                                               unsigned int newHeight,
+                                               const NormalizationParameters& normParams,
+                                               unsigned int batchSize,
+                                               const armnn::DataLayout& outputLayout)
+{
+    // Get float32 image tensor
+    std::vector<float> imageDataFloat =
+            PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
+    std::vector<int8_t> imageDataQasymms8;
+    imageDataQasymms8.reserve(imageDataFloat.size());
+    // Convert to uint8 image tensor with static cast
+    std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
+                   [](float val) { return static_cast<uint8_t>(val); });
+    return imageDataQasymms8;
+}
+
 /** Write image tensor to ofstream
  *
  * @param[in] imageData         Image tensor data
@@ -176,3 +200,11 @@
 {
     std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
 }
+
+// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
+// numerical values
+template <>
+void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
+{
+    std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
+}
\ No newline at end of file