IVGCVSW-3129 Image pre-processing fix for TFLite

 * Resized images for quantized models are now statically cast to uint8
   instead of quantized

 * Removed optional quantization parameters from ImagePreprocessor
   constructor

 * Changed mean and scale for TFLite models

Signed-off-by: FinnWilliamsArm <Finn.Williams@arm.com>
Change-Id: Id5ffdf77f3614d10c417e769bd8ffc4a4c07308b
diff --git a/tests/ImagePreprocessor.cpp b/tests/ImagePreprocessor.cpp
index 0ef0fda..74bc943 100644
--- a/tests/ImagePreprocessor.cpp
+++ b/tests/ImagePreprocessor.cpp
@@ -31,7 +31,7 @@
 
     result = image.Resize(m_Width, m_Height, CHECK_LOCATION(),
                           InferenceTestImage::ResizingMethods::BilinearAndNormalized,
-                          m_Mean, m_Stddev);
+                          m_Mean, m_Stddev, m_Scale);
 
     // duplicate data across the batch
     for (unsigned int i = 1; i < m_BatchSize; i++)
@@ -72,9 +72,8 @@
 
     for (size_t i=0; i<resizedSize; ++i)
     {
-        quantized[i] = armnn::Quantize<uint8_t>(resized[i],
-                                                m_Scale,
-                                                m_Offset);
+        quantized[i] = static_cast<uint8_t>(resized[i]);
     }
+
     return std::make_unique<TTestCaseData>(label, std::move(quantized));
 }
diff --git a/tests/ImagePreprocessor.hpp b/tests/ImagePreprocessor.hpp
index d77113c..cd58681 100644
--- a/tests/ImagePreprocessor.hpp
+++ b/tests/ImagePreprocessor.hpp
@@ -33,8 +33,7 @@
         unsigned int width,
         unsigned int height,
         const std::vector<ImageSet>& imageSet,
-        float scale=1.0,
-        int32_t offset=0,
+        float scale=255.0f,
         const std::array<float, 3> mean={{0, 0, 0}},
         const std::array<float, 3> stddev={{1, 1, 1}},
         DataFormat dataFormat=DataFormat::NHWC,
@@ -44,7 +43,6 @@
     , m_Width(width)
     , m_BatchSize(batchSize)
     , m_Scale(scale)
-    , m_Offset(offset)
     , m_ImageSet(imageSet)
     , m_Mean(mean)
     , m_Stddev(stddev)
@@ -66,7 +64,6 @@
     unsigned int m_BatchSize;
     // Quantization parameters
     float m_Scale;
-    int32_t m_Offset;
     const std::vector<ImageSet> m_ImageSet;
 
     const std::array<float, 3> m_Mean;
diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp
index b011e6a..92c67ae 100644
--- a/tests/InferenceTestImage.cpp
+++ b/tests/InferenceTestImage.cpp
@@ -55,6 +55,7 @@
 std::vector<float> ResizeBilinearAndNormalize(const InferenceTestImage & image,
                                               const unsigned int outputWidth,
                                               const unsigned int outputHeight,
+                                              const float scale,
                                               const std::array<float, 3>& mean,
                                               const std::array<float, 3>& stddev)
 {
@@ -114,7 +115,7 @@
                 const float ly0 = Lerp(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw);
                 const float ly1 = Lerp(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw);
                 const float l = Lerp(ly0, ly1, yw);
-                PutData(out, outputWidth, x, y, c, ((l/255.0f) - mean[c])/stddev[c]);
+                PutData(out, outputWidth, x, y, c, ((l / scale) - mean[c]) / stddev[c]);
             }
         }
     }
@@ -210,7 +211,8 @@
                                               const armnn::CheckLocation& location,
                                               const ResizingMethods meth,
                                               const std::array<float, 3>& mean,
-                                              const std::array<float, 3>& stddev)
+                                              const std::array<float, 3>& stddev,
+                                              const float scale)
 {
     std::vector<float> out;
     if (newWidth == 0 || newHeight == 0)
@@ -227,7 +229,7 @@
         }
         case ResizingMethods::BilinearAndNormalized:
         {
-            out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, mean, stddev);
+            out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, scale, mean, stddev);
             break;
         }
         default:
diff --git a/tests/InferenceTestImage.hpp b/tests/InferenceTestImage.hpp
index 59f4cc7..643d060 100644
--- a/tests/InferenceTestImage.hpp
+++ b/tests/InferenceTestImage.hpp
@@ -92,7 +92,8 @@
                               const armnn::CheckLocation& location,
                               const ResizingMethods meth = ResizingMethods::STB,
                               const std::array<float, 3>& mean = {{0.0, 0.0, 0.0}},
-                              const std::array<float, 3>& stddev = {{1.0, 1.0, 1.0}});
+                              const std::array<float, 3>& stddev = {{1.0, 1.0, 1.0}},
+                              const float scale = 255.0f);
 
     void Write(WriteFormat format, const char* filePath) const;
 
diff --git a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
index 79ee49e..8786fea 100644
--- a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
+++ b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
@@ -40,8 +40,7 @@
                              224,
                              224,
                              imageSet,
-                             1.0,                             // scale
-                             0,                               // offset
+                             255.0,                           // scale
                              {{0.485f, 0.456f, 0.406f}},      // mean
                              {{0.229f, 0.224f, 0.225f}},      // stddev
                              DatabaseType::DataFormat::NCHW); // format
diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
index 4fa0e14..bf5a865 100644
--- a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
@@ -40,14 +40,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              299,
                              299,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset());
+                             1);
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
index bcb9db8..b0af830 100644
--- a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
@@ -40,14 +40,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              299,
                              299,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset());
+                             1);
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
index c676cd7..4cf16d7 100644
--- a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
+++ b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
@@ -42,7 +42,9 @@
                              dataDir,
                              224,
                              224,
-                             imageSet);
+                             imageSet,
+                             127.5f,
+                             {0.5f,0.5f,0.5f});
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
index 4d99e9e..7082849 100644
--- a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
+++ b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
@@ -40,14 +40,12 @@
                 [&imageSet](const char* dataDir, const ModelType & model) {
                     // we need to get the input quantization parameters from
                     // the parsed model
-                    auto inputBinding = model.GetInputBindingInfo();
                     return DatabaseType(
                             dataDir,
                             128,
                             128,
                             imageSet,
-                            inputBinding.second.GetQuantizationScale(),
-                            inputBinding.second.GetQuantizationOffset(),
+                            1,
                             {{0, 0, 0}},
                             {{1, 1, 1}},
                             DatabaseType::DataFormat::NCHW,
diff --git a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
index 220964d..1b411f9 100644
--- a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
@@ -108,14 +108,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              224,
                              224,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset());
+                             1);
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
index 5db5c24..9bc1034 100644
--- a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
@@ -40,14 +40,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              224,
                              224,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset());
+                             1);
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
index 48e6321..98235e3 100644
--- a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
+++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
@@ -40,14 +40,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              224,
                              224,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset());
+                             1);
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
index 36fc72c..1e2ffbf 100644
--- a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
+++ b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
@@ -28,7 +28,6 @@
         using DatabaseType = ImagePreprocessor<DataType>;
         using ParserType = armnnTfLiteParser::ITfLiteParser;
         using ModelType = InferenceModel<ParserType, DataType>;
-
         // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
         retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
                                                           ParserType>(
@@ -43,7 +42,9 @@
                              dataDir,
                              299,
                              299,
-                             imageSet);
+                             imageSet,
+                             127.5f,
+                             {0.5f,0.5f,0.5f});
                      },
                      &inputTensorShape);
     }
diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
index 0ba1e5d..2084d2d 100644
--- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
@@ -42,14 +42,12 @@
                      [&imageSet](const char* dataDir, const ModelType & model) {
                          // we need to get the input quantization parameters from
                          // the parsed model
-                         auto inputBinding = model.GetInputBindingInfo();
                          return DatabaseType(
                              dataDir,
                              224,
                              224,
                              imageSet,
-                             inputBinding.second.GetQuantizationScale(),
-                             inputBinding.second.GetQuantizationOffset(),
+                             1,
                              {{0, 0, 0}},
                              {{1, 1, 1}},
                              DatabaseType::DataFormat::NCHW,