MLECO-1904: Update to use latest TFLu

* Now uses seperate TFLu github repo
* Fixes to align with API changes
* Update ASR model ops and re-enable ASR inference tests
* Set default release level to release_with_logs

Signed-off-by: Richard Burton <richard.burton@arm.com>

Change-Id: I57612088985dece1413c5c00a6e442381e07dd91
diff --git a/tests/common/ClassifierTests.cc b/tests/common/ClassifierTests.cc
index a04e4c2..d950304 100644
--- a/tests/common/ClassifierTests.cc
+++ b/tests/common/ClassifierTests.cc
@@ -21,7 +21,7 @@
 
 template<typename T>
 void test_classifier_result(std::vector<std::pair<uint32_t, T>>& selectedResults, T defaultTensorValue) {
-    const int dimArray[] = {1, 1001};
+    int dimArray[] = {1, 1001};
     std::vector <std::string> labels(1001);
     std::vector<T> outputVec(1001, defaultTensorValue);
     TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
diff --git a/tests/use_case/asr/AsrClassifierTests.cc b/tests/use_case/asr/AsrClassifierTests.cc
index 12523aa..e2bfb18 100644
--- a/tests/use_case/asr/AsrClassifierTests.cc
+++ b/tests/use_case/asr/AsrClassifierTests.cc
@@ -30,7 +30,7 @@
 
 
 TEST_CASE("Test valid classifier UINT8") {
-    const int dimArray[] = {4, 1, 1, 246, 29};
+    int dimArray[] = {4, 1, 1, 246, 29};
     std::vector <std::string> labels(29);
     std::vector <uint8_t> outputVec(7134);
     TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
@@ -46,7 +46,7 @@
 
 
 TEST_CASE("Get classification results") {
-    const int dimArray[] = {4, 1, 1, 10, 15};
+    int dimArray[] = {4, 1, 1, 10, 15};
     std::vector <std::string> labels(15);
     std::vector<uint8_t> outputVec(150, static_cast<uint8_t>(1));
     TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index 0943db8..d5e6c35 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -54,8 +54,7 @@
     return true;
 }
 
-/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
-TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter]")
 {
     arm::app::Wav2LetterModel model{};
 
@@ -86,7 +85,7 @@
     }
 }
 
-TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
 {
     for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
         auto input_goldenFV = get_ifm_data_array(i);;
diff --git a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
index 1391011..8af9014 100644
--- a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
+++ b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
@@ -108,7 +108,7 @@
     /* Constants. */
     const uint32_t  windowLen       = 512;
     const uint32_t  windowStride    = 160;
-    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    int             dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
     const float     quantScale      = 0.1410219967365265;
     const int       quantOffset     = -11;
 
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index b2720a8..6fbf374 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -24,7 +24,7 @@
 
 using namespace test;
 
-bool RunInference(arm::app::Model& model, const uint8_t imageData[])
+bool RunInference(arm::app::Model& model, const int8_t imageData[])
 {
     TfLiteTensor* inputTensor = model.GetInputTensor(0);
     REQUIRE(inputTensor);
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 897ad0a..5f5ad98 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -55,8 +55,7 @@
     return true;
 }
 
-/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
-TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
 {
     arm::app::Wav2LetterModel model{};
 
@@ -88,7 +87,7 @@
     }
 }
 
-TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
 {
     for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
         auto input_goldenFV = get_ifm_data_array(i);;
diff --git a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
index e71366a..16dbea2 100644
--- a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
+++ b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
@@ -108,7 +108,7 @@
     /* Constants. */
     const uint32_t  windowLen       = 512;
     const uint32_t  windowStride    = 160;
-    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    int             dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
     const float     quantScale      = 0.1410219967365265;
     const int       quantOffset     = -11;