MLECO-2458 and MLECO-2476 [Fix] VWW IFM quant step

* Changed image->cc conversion to be similar with preprocessing
  of img_class and vww models: images are scaled maintaing the
  aspect ration and then the centre crop of the correct size
  is taken.
* VWW applies input quantization info to the int8 image
  (prior converted to [0,1] float range).
* Changed adult_blur to a image without person.
* Fix menu print when selecting a specific ifm to run
  (Select message was displayed after typing something)

Change-Id: Ie6cde7ab4835ea842667b87397458a5d32131df3
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
index d5e21c2..ad785e8 100644
--- a/tests/use_case/ad/InferenceTestAD.cc
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -64,7 +64,7 @@
 template <typename T>
 void TestInference(const T *input_goldenFV, const T *output_goldenFV, arm::app::Model& model)
 {
-    REQUIRE(RunInference(model, (int8_t*)input_goldenFV));
+    REQUIRE(RunInference(model, static_cast<const T*>(input_goldenFV)));
 
     TfLiteTensor *outputTensor = model.GetOutputTensor(0);
 
@@ -75,7 +75,7 @@
 
     for (size_t i = 0; i < outputTensor->bytes; i++)
     {
-        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+        REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
     }
 }
 
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index d5e6c35..1f9cb80 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -81,7 +81,7 @@
     REQUIRE(tensorData);
 
     for (size_t i = 0; i < outputTensor->bytes; i++) {
-        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+        REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
     }
 }
 
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index 6fbf374..bb89c99 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -56,7 +56,7 @@
     REQUIRE(tensorData);
 
     for (size_t i = 0; i < outputTensor->bytes; i++) {
-        REQUIRE((int)tensorData[i] == Approx((int)((T)goldenFV[i])).epsilon(tolerance));
+        REQUIRE(static_cast<int>(tensorData[i]) == Approx(static_cast<int>((T)goldenFV[i])).epsilon(tolerance));
     }
 }
 
diff --git a/tests/use_case/kws/InferenceTestDSCNN.cc b/tests/use_case/kws/InferenceTestDSCNN.cc
index d02e33c..7ce55dd 100644
--- a/tests/use_case/kws/InferenceTestDSCNN.cc
+++ b/tests/use_case/kws/InferenceTestDSCNN.cc
@@ -70,7 +70,7 @@
     REQUIRE(tensorData);
 
     for (size_t i = 0; i < outputTensor->bytes; i++) {
-        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+        REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
     }
 }
 
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
index e210c33..134003d 100644
--- a/tests/use_case/kws_asr/InferenceTestDSCNN.cc
+++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
@@ -68,7 +68,7 @@
     REQUIRE(tensorData);
 
     for (size_t i = 0; i < outputTensor->bytes; i++) {
-        REQUIRE((int) tensorData[i] == (int) ((T) output_goldenFV[i]));
+        REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>((T) output_goldenFV[i]));
     }
 }
 
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 5f5ad98..1b14a42 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -83,7 +83,7 @@
     REQUIRE(tensorData);
 
     for (size_t i = 0; i < outputTensor->bytes; i++) {
-        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+        REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
     }
 }
 
diff --git a/tests/use_case/vww/VisualWakeWordUCTests.cc b/tests/use_case/vww/VisualWakeWordUCTests.cc
index 891423b..700a6bb 100644
--- a/tests/use_case/vww/VisualWakeWordUCTests.cc
+++ b/tests/use_case/vww/VisualWakeWordUCTests.cc
@@ -73,7 +73,7 @@
 
     auto results = caseContext.Get<std::vector<arm::app::ClassificationResult>>("results");
 
-    REQUIRE(results[0].m_labelIdx == 0);
+    REQUIRE(results[0].m_labelIdx == 1);
 }
 
 TEST_CASE("Inference run all images")
diff --git a/tests/utils/ImageUtils.cc b/tests/utils/ImageUtils.cc
index f77ce1e..506040f 100644
--- a/tests/utils/ImageUtils.cc
+++ b/tests/utils/ImageUtils.cc
@@ -18,12 +18,12 @@
 
 void convertImgIoInt8(void * data, const size_t sz)
 {
-    uint8_t * tmp_req_data          = (uint8_t *)data;
-    int8_t * tmp_signed_req_data    = (int8_t *) data;
+    uint8_t * tmp_req_data          = static_cast<uint8_t *>(data);
+    int8_t * tmp_signed_req_data    = static_cast<int8_t *>(data);
 
     for (size_t i = 0; i < sz; ++i) {
-        tmp_signed_req_data[i] = (int8_t)(
-                (int32_t)(tmp_req_data[i]) - 128);
+        tmp_signed_req_data[i] = static_cast<int8_t>(
+                static_cast<int32_t>(tmp_req_data[i]) - 128);
     }
 }