IVGCVSW-2436 Modify MobileNet SSD inference test

 * change MobileNet SSD input to uint8
 * get quantization scale and offset from the model
 * change data layout to NHWC as TensorFlow lite layout
 * update expected output as result from TfLite with quantized data

Change-Id: I07104d56286893935779169356234de53f1c9492
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index 0091009..10ee1dc 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -126,7 +126,7 @@
     }
 
 private:
-    static constexpr unsigned int k_NumDetections = 10u;
+    static constexpr unsigned int k_NumDetections = 1u;
 
     static constexpr unsigned int k_OutputSize1 = k_NumDetections * 4u;
     static constexpr unsigned int k_OutputSize2 = k_NumDetections;
@@ -169,8 +169,8 @@
         {
             return false;
         }
-
-        m_Database = std::make_unique<MobileNetSsdDatabase>(m_DataDir.c_str());
+        std::pair<float, int32_t> qParams = m_Model->GetQuantizationParams();
+        m_Database = std::make_unique<MobileNetSsdDatabase>(m_DataDir.c_str(), qParams.first, qParams.second);
         if (!m_Database)
         {
             return false;