Opensource ML embedded evaluation kit

Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
diff --git a/tests/use_case/asr/AsrClassifierTests.cc b/tests/use_case/asr/AsrClassifierTests.cc
new file mode 100644
index 0000000..7c71912
--- /dev/null
+++ b/tests/use_case/asr/AsrClassifierTests.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AsrClassifier.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Test invalid classifier")
+{
+    TfLiteTensor* outputTens = nullptr;
+    std::vector <arm::app::ClassificationResult> resultVec;
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(!classifier.GetClassificationResults(outputTens, resultVec, {}, 1));
+}
+
+
+TEST_CASE("Test valid classifier UINT8") {
+    const int dimArray[] = {4, 1, 1, 246, 29};
+    std::vector <std::string> labels(29);
+    std::vector <uint8_t> outputVec(7134);
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                                outputVec.data(), dims, 1, 0, "test");
+    TfLiteTensor* outputTensor = &tfTensor;
+    std::vector <arm::app::ClassificationResult> resultVec;
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+    REQUIRE(246 == resultVec.size());
+}
+
+
+TEST_CASE("Get classification results") {
+    const int dimArray[] = {4, 1, 1, 10, 15};
+    std::vector <std::string> labels(15);
+    std::vector<uint8_t> outputVec(150, static_cast<uint8_t>(1));
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                                outputVec.data(), dims, 1, 0, "test");
+    TfLiteTensor* outputTensor = &tfTensor;
+
+    std::vector <arm::app::ClassificationResult> resultVec(10);
+
+    /* set the top five results: */
+    std::vector<std::pair<uint32_t, std::pair<uint32_t, uint8_t>>> selectedResults {
+        {0, {3, 23}},
+        {0, {9, 15}},
+        {1, {5, 24}},
+        {1, {7, 4}},
+        {2, {9, 5}},
+        {3, {8, 6}},
+        {4, {13, 10}},
+        {4, {6, 18}},
+        {5, {3, 15}},
+        {5, {4, 115}},
+        {6, {6, 25}},
+        {7, {1, 7}},
+        {8, {11, 9}},
+        {9, {1, 10}}
+    };
+
+    const uint32_t nCols = outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
+    for (size_t i = 0; i < selectedResults.size(); ++i) {
+        uint32_t rIndex = selectedResults[i].first;
+        uint32_t cIndex = selectedResults[i].second.first;
+        uint8_t   value = selectedResults[i].second.second;
+        outputVec[rIndex * nCols + cIndex] = value;
+    }
+
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+    REQUIRE(resultVec[0].m_labelIdx == 3);
+    REQUIRE(resultVec[1].m_labelIdx == 5);
+    REQUIRE(resultVec[2].m_labelIdx == 9);
+    REQUIRE(resultVec[3].m_labelIdx == 8);
+    REQUIRE(resultVec[4].m_labelIdx == 6);
+    REQUIRE(resultVec[5].m_labelIdx == 4);
+    REQUIRE(resultVec[6].m_labelIdx == 6);
+    REQUIRE(resultVec[7].m_labelIdx == 1);
+    REQUIRE(resultVec[8].m_labelIdx == 11);
+    REQUIRE(resultVec[9].m_labelIdx == 1);
+}
diff --git a/tests/use_case/asr/AsrFeaturesTests.cc b/tests/use_case/asr/AsrFeaturesTests.cc
new file mode 100644
index 0000000..9401f40
--- /dev/null
+++ b/tests/use_case/asr/AsrFeaturesTests.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DataStructures.hpp"
+#include "AsrGoldenFeatures.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterPreprocess.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+class TestPreprocess : public arm::app::audio::asr::Preprocess {
+public:
+    TestPreprocess()
+    : arm::app::audio::asr::Preprocess(0,0,0,0)
+    {}
+
+    bool ComputeDeltas(arm::app::Array2d<float>& mfcc,
+                       arm::app::Array2d<float>& delta1,
+                       arm::app::Array2d<float>& delta2)
+    {
+        return this->_ComputeDeltas(mfcc, delta1, delta2);
+    }
+
+    float GetMean(arm::app::Array2d<float>& vec)
+    {
+        return this->_GetMean(vec);
+    }
+
+    float GetStdDev(arm::app::Array2d<float>& vec, const float mean)
+    {
+       return this->_GetStdDev(vec, mean);
+    }
+
+    void NormaliseVec(arm::app::Array2d<float>& vec)
+    {
+        return this->_NormaliseVec(vec);
+    }
+};
+
+template<class T>
+void CheckOutputs(const std::vector<T> goldenOutput, std::vector<T> output)
+{
+    const size_t goldenSize = goldenOutput.size();
+    const size_t realSize = output.size();
+
+    REQUIRE(realSize == goldenSize);
+    REQUIRE_THAT(output, Catch::Approx( goldenOutput ).margin(0.0001));
+}
+template void CheckOutputs<float>(const std::vector<float> goldenOutput, std::vector<float> output);
+
+void populateBuffer(const float* input, size_t size, size_t numMfccFeats, std::vector<std::vector<float>>& buf)
+{
+    size_t time = 0;
+    for (size_t i = 0; i < size; ++i) {
+        if (i > 0 && i % numMfccFeats == 0) {
+            ++time;
+        }
+        float featureValue = *(input + i);
+        buf[i % numMfccFeats][time] = featureValue;
+    }
+}
+
+void populateArray2dWithVectorOfVector(std::vector<std::vector<float>> vec, arm::app::Array2d<float>& buf)
+{
+    for (size_t i = 0; i < vec.size(); ++i) {
+        for (size_t j = 0; j < vec[i].size(); ++j) {
+            buf(i, j) = vec[i][j];
+        }
+    }
+}
+
+TEST_CASE("Floating point asr features calculation", "[ASR]")
+{
+    TestPreprocess tp;
+
+    SECTION("First and second diff")
+    {
+        constexpr uint32_t numMfccFeats = 13;
+        constexpr uint32_t numFeatVectors = 296;
+
+        arm::app::Array2d<float> mfccBuf(numMfccFeats, numFeatVectors);
+        arm::app::Array2d<float> delta1Buf(numMfccFeats, numFeatVectors);
+        arm::app::Array2d<float> delta2Buf(numMfccFeats, numFeatVectors);
+
+        std::vector<std::vector<float>> goldenMfccBuf(numMfccFeats, std::vector<float>(numFeatVectors));
+        std::vector<std::vector<float>> goldenDelta1Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+        std::vector<std::vector<float>> goldenDelta2Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+
+        populateBuffer(golden_asr_mfcc, golden_asr_mfcc_len, numMfccFeats, goldenMfccBuf);
+        populateBuffer(golden_diff1_features, golden_diff1_len, numMfccFeats, goldenDelta1Buf);
+        populateBuffer(golden_diff2_features, golden_diff2_len, numMfccFeats, goldenDelta2Buf);
+
+        populateArray2dWithVectorOfVector(goldenMfccBuf, mfccBuf);
+        std::fill(delta1Buf.begin(), delta1Buf.end(), 0.f);
+        std::fill(delta2Buf.begin(), delta2Buf.end(), 0.f);
+
+        tp.ComputeDeltas(mfccBuf, delta1Buf, delta2Buf);
+
+        /* First 4 and last 4 values are different because we pad AFTER diff calculated. */
+        for (size_t i = 0; i < numMfccFeats; ++i) {
+            const float* start_goldenDelta1Buf = goldenDelta1Buf[i].data() + 4;
+            const float* start_delta1 = delta1Buf.begin() + i * delta1Buf.size(1) + 4;
+            std::vector<float> goldenDataDelta1(start_goldenDelta1Buf, start_goldenDelta1Buf + numFeatVectors - 8);
+            std::vector<float> tensorDataDelta1(start_delta1, start_delta1 + numFeatVectors - 8);
+
+            CheckOutputs<float>(goldenDataDelta1,tensorDataDelta1);
+
+            const float* start_goldenDelta2Buf = goldenDelta2Buf[i].data() + 4;
+            const float* start_delta2 = delta2Buf.begin() + i * delta2Buf.size(1) + 4;
+            std::vector<float> goldenDataDelta2(start_goldenDelta2Buf, start_goldenDelta2Buf + numFeatVectors - 8);
+            std::vector<float> tensorDataDelta2(start_delta2, start_delta2 + numFeatVectors - 8);
+
+            CheckOutputs<float>(goldenDataDelta2,tensorDataDelta2);
+        }
+
+    }
+
+    SECTION("Mean")
+    {
+        std::vector<std::vector<float>> mean1vec{{1, 2},
+                                                {-1, -2}};
+        arm::app::Array2d<float> mean1(2,2); /* {{1, 2},{-1, -2}} */
+        populateArray2dWithVectorOfVector(mean1vec, mean1);
+        REQUIRE(0 == Approx(tp.GetMean(mean1)));
+
+        arm::app::Array2d<float> mean2(2, 2);
+        std::fill(mean2.begin(), mean2.end(), 0.f);
+        REQUIRE(0 == Approx(tp.GetMean(mean2)));
+
+        arm::app::Array2d<float> mean3(3,3);
+        std::fill(mean3.begin(), mean3.end(), 1.f);
+        REQUIRE(1 == Approx(tp.GetMean(mean3)));
+    }
+
+    SECTION("Std")
+    {
+        arm::app::Array2d<float> std1(2, 2);
+        std::fill(std1.begin(), std1.end(), 0.f); /* {{0, 0}, {0, 0}} */
+        REQUIRE(0 == Approx(tp.GetStdDev(std1, 0)));
+
+        std::vector<std::vector<float>> std2vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+        arm::app::Array2d<float> std2(2,5);
+        populateArray2dWithVectorOfVector(std2vec, std2);
+        const float mean = tp.GetMean(std2);
+        REQUIRE(2.872281323 == Approx(tp.GetStdDev(std2, mean)));
+
+        arm::app::Array2d<float> std3(2,2);
+        std::fill(std3.begin(), std3.end(), 1.f); /* std3{{1, 1}, {1, 1}}; */
+        REQUIRE(0 == Approx(tp.GetStdDev(std3, 1)));
+    }
+
+    SECTION("Norm") {
+        auto checker = [&](arm::app::Array2d<float>& d, std::vector<float>& g) {
+            tp.NormaliseVec(d);
+            std::vector<float> d_vec(d.begin(), d.end());
+            REQUIRE_THAT(g, Catch::Approx(d_vec));
+        };
+
+        std::vector<std::vector<float>> norm0vec{{1, 1}, {1, 1}};
+        std::vector<float> goldenNorm0 {0, 0, 0, 0};
+        arm::app::Array2d<float> norm0(2, 2);
+        populateArray2dWithVectorOfVector(norm0vec, norm0);
+        checker(norm0, goldenNorm0);
+
+        std::vector<std::vector<float>> norm1vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+        std::vector<float> goldenNorm1 {
+            -1.218543592, -0.87038828, -0.522232968, -0.174077656, 0.174077656,
+             0.522232968,  0.87038828,  1.218543592,  1.566698904, -1.566698904};
+        arm::app::Array2d<float> norm1(2, 5);
+        populateArray2dWithVectorOfVector(norm1vec, norm1);
+        checker(norm1, goldenNorm1);
+    }
+}
diff --git a/tests/use_case/asr/AsrTests.cc b/tests/use_case/asr/AsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/asr/AsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..1fa4092
--- /dev/null
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+    return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    arm::app::Wav2LetterModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        auto input_goldenFV = get_ifm_data_array(i);;
+        auto output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::Wav2LetterModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/asr/MfccTests.cc b/tests/use_case/asr/MfccTests.cc
new file mode 100644
index 0000000..c70e53e
--- /dev/null
+++ b/tests/use_case/asr/MfccTests.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 512 samples from itellyou.wav. */
+const std::vector<int16_t> testWav1 = std::vector<int16_t> {
+    -3,0,1,-1,2,3,-2,2,
+    1,-2,0,3,-1,8,3,2,
+    -1,-1,2,7,3,5,6,6,
+    6,12,5,6,3,3,5,4,
+    4,6,7,7,7,3,7,2,
+    8,4,4,2,-4,-1,-1,-4,
+    2,1,-1,-4,0,-7,-6,-2,
+    -5,1,-5,-1,-7,-3,-3,-7,
+    0,-3,3,-5,0,1,-2,-2,
+    -3,-3,-7,-3,-2,-6,-5,-8,
+    -2,-8,4,-9,-4,-9,-5,-5,
+    -3,-9,-3,-9,-1,-7,-4,1,
+    -3,2,-8,-4,-4,-5,1,-3,
+    -1,0,-1,-2,-3,-2,-4,-1,
+    1,-1,3,0,3,2,0,0,
+    0,-3,1,1,0,8,3,4,
+    1,5,6,4,7,3,3,0,
+    3,6,7,6,4,5,9,9,
+    5,5,8,1,6,9,6,6,
+    7,1,8,1,5,0,5,5,
+    0,3,2,7,2,-3,3,0,
+    3,0,0,0,2,0,-1,-1,
+    -2,-3,-8,0,1,0,-3,-3,
+    -3,-2,-3,-3,-4,-6,-2,-8,
+    -9,-4,-1,-5,-3,-3,-4,-3,
+    -6,3,0,-1,-2,-9,-4,-2,
+    2,-1,3,-5,-5,-2,0,-2,
+    0,-1,-3,1,-2,9,4,5,
+    2,2,1,0,-6,-2,0,0,
+    0,-1,4,-4,3,-7,-1,5,
+    -6,-1,-5,4,3,9,-2,1,
+    3,0,0,-2,1,2,1,1,
+    0,3,2,-1,3,-3,7,0,
+    0,3,2,2,-2,3,-2,2,
+    -3,4,-1,-1,-5,-1,-3,-2,
+    1,-1,3,2,4,1,2,-2,
+    0,2,7,0,8,-3,6,-3,
+    6,1,2,-3,-1,-1,-1,1,
+    -2,2,1,2,0,-2,3,-2,
+    3,-2,1,0,-3,-1,-2,-4,
+    -6,-5,-8,-1,-4,0,-3,-1,
+    -1,-1,0,-2,-3,-7,-1,0,
+    1,5,0,5,1,1,-3,0,
+    -6,3,-8,4,-8,6,-6,1,
+    -6,-2,-5,-6,0,-5,4,-1,
+    4,-2,1,2,1,0,-2,0,
+    0,2,-2,2,-5,2,0,-2,
+    1,-2,0,5,1,0,1,5,
+    0,8,3,2,2,0,5,-2,
+    3,1,0,1,0,-2,-1,-3,
+    1,-1,3,0,3,0,-2,-1,
+    -4,-4,-4,-1,-4,-4,-3,-6,
+    -3,-7,-3,-1,-2,0,-5,-4,
+    -7,-3,-2,-2,1,2,2,8,
+    5,4,2,4,3,5,0,3,
+    3,6,4,2,2,-2,4,-2,
+    3,3,2,1,1,4,-5,2,
+    -3,0,-1,1,-2,2,5,1,
+    4,2,3,1,-1,1,0,6,
+    0,-2,-1,1,-1,2,-5,-1,
+    -5,-1,-6,-3,-3,2,4,0,
+    -1,-5,3,-4,-1,-3,-4,1,
+    -4,1,-1,-1,0,-5,-4,-2,
+    -1,-1,-3,-7,-3,-3,4,4,
+};
+
+const std::vector<int16_t> testWav2 = std::vector<int16_t> (512, 0);
+
+/* Golden mfcc output for testwav1. */
+const std::vector<float> golden_mfcc_output_testWav1 {
+    -835.24603, 21.010452, 18.699404, 7.4338417, 19.028961, -5.401735, 6.4761047, -11.400679,
+    8.392709, 12.202361, 8.403276, -13.508412, -18.307348
+};
+
+/* Golden mfcc output for the all zero wav. */
+const std::vector<float> golden_mfcc_output_testWav2 {
+    -1131.37085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+arm::app::audio::Wav2LetterMFCC GetMFCCInstance()
+{
+    const auto sampFreq = arm::app::audio::Wav2LetterMFCC::ms_defaultSamplingFreq;
+    const auto frameLenMs = 32;
+    const auto numMfccFeats = 13;
+    const auto frameLenSamples = sampFreq * frameLenMs * 0.001;
+    return arm::app::audio::Wav2LetterMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuantisedMFCC()
+{
+    const auto quantScale = 0.1410219967365265;
+    const auto quantOffset = 11;
+    std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav1, quantScale, quantOffset);
+
+    long min_val = std::numeric_limits<T>::min();
+    long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < golden_mfcc_output_testWav1.size(); i++){
+        long TestWavMfcc = (std::lround((golden_mfcc_output_testWav1[i] / quantScale) + quantOffset));
+        T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+        REQUIRE(quantizedTestWavMfcc  == Approx(mfccOutput[i]).margin(2));
+    }
+}
+
+template void TestQuantisedMFCC<int8_t>();
+template void TestQuantisedMFCC<uint8_t>();
+template void TestQuantisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation")
+{
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32")
+    {
+        auto mfccOutput = GetMFCCInstance().MfccCompute(testWav1);
+        REQUIRE_THAT( mfccOutput, Catch::Approx( golden_mfcc_output_testWav1 ).margin(0.3) );
+
+        auto mfccOutput2 = GetMFCCInstance().MfccCompute(testWav2);
+        REQUIRE_THAT( mfccOutput2, Catch::Approx( golden_mfcc_output_testWav2 ).margin(0.001) );
+    }
+
+    SECTION("int8_t")
+    {
+        TestQuantisedMFCC<int8_t>();
+    }
+
+    SECTION("uint8_t")
+    {
+        TestQuantisedMFCC<uint8_t>();
+    }
+
+    SECTION("int16_t")
+    {
+        TestQuantisedMFCC<int16_t>();
+    }
+}
diff --git a/tests/use_case/asr/OutputDecodeTests.cc b/tests/use_case/asr/OutputDecodeTests.cc
new file mode 100644
index 0000000..22153f3
--- /dev/null
+++ b/tests/use_case/asr/OutputDecodeTests.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "OutputDecode.hpp"
+
+#include "catch.hpp"
+
+TEST_CASE("Running output decode on test vector") {
+
+    std::vector<arm::app::ClassificationResult> vecResult(20);
+    /* Number of test inputs. */
+    const size_t numStrings = 8; 
+    
+    /* The test inputs. */
+    std::string testText[numStrings][20] 
+    {
+        {"a", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"},  /* initial */
+        {" ", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", " "},  /* space start and end */
+        {"\'", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "\'"}, /* apostrophe start and end */
+        {"a", "a", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "l"},   /* Double start and end */
+        {"a", "b", "c", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"},  /* Legit double character */
+        {"a", "$", "a", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "l"},    /* Legit double character start and end */
+        {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "$"},    /* $$ */
+        {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "l", "l"}
+    };
+
+    /* The golden outputs for the above test inputs. */
+    std::string expectedOutput[numStrings] =
+    {
+        {"abcdefg hi jk\'l"},
+        {" bcdefg hi jk\' "},
+        {"\'bcdefg hi jk\'l\'"},
+        {"acdefg hi jk\'l"},
+        {"abcdefgoohi jk\'l"},
+        {"aadefgoohi jkll"},
+        {"abdefgoohi jkl"},
+        {"abdefgoohi jkl"}
+    };
+
+    /*For each test input. */
+    for (size_t h = 0; h < numStrings; ++h)
+    {
+        /* Generate fake vecResults.m_label to mimic AsrClassifier output containing the testText. */
+        for (size_t i = 0; i < 20; i++)
+        {
+            vecResult[i].m_label = testText[h][i];
+        }
+        /* Call function with fake vecResults and save returned string into 'buff'. */
+        std::string buff = arm::app::audio::asr::DecodeOutput(vecResult); 
+
+        /* Check that the string returned from the function matches the expected output given above. */
+        REQUIRE(buff.compare(expectedOutput[h]) == 0); 
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..9ed2e1b
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(
+                        std::vector <int>&      shape,
+                        T                       initVal,
+                        std::vector<T>&         vectorBuf)
+{
+    REQUIRE(0 != shape.size());
+
+    shape.insert(shape.begin(), shape.size());
+    uint32_t sizeInBytes = sizeof(T);
+    for (size_t i = 1; i < shape.size(); ++i) {
+        sizeInBytes *= shape[i];
+    }
+
+    /* Allocate mem. */
+    vectorBuf = std::vector<T>(sizeInBytes, initVal);
+    TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+    return tflite::testing::CreateQuantizedTensor(
+                                vectorBuf.data(), dims,
+                                1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+    SECTION("Mismatched post processing parameters and tensor size")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 1, 13};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+        REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+
+    SECTION("Post processing succeeds")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 13, 1};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+}
+
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+    constexpr uint32_t ctxLen = 5;
+    constexpr uint32_t innerLen = 3;
+    constexpr uint32_t nRows = 2*ctxLen + innerLen;
+    constexpr uint32_t nCols = 10;
+    constexpr uint32_t blankTokenIdx = nCols - 1;
+    std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+    SECTION("First and last iteration")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+
+    SECTION("Right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase the right context only. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+        REQUIRE(originalVec != tensorVec);
+
+        /* The last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                }
+
+                /* Check left context is preserved. */
+                CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Left and right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase right context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        REQUIRE(originalVec != tensorVec);
+
+        /* The first and last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check left and right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                    CHECK(tensorVec[i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                    CHECK(tensorVec[i*nCols + j] == 0);
+                }
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            /* Check left context is preserved. */
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Try left context erase")
+    {
+        /* Should not be able to erase the left context if it is the first iteration. */
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+
+        REQUIRE(originalVec == tensorVec);
+    }
+}
diff --git a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..1391011
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <limits>
+#include <algorithm>
+#include <catch.hpp>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors  = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures * 3] = {
+    /* Feature vec 0. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,    /* MFCCs.   */
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,    /* Delta 1. */
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,    /* Delta 2. */
+
+    /* Feature vec 1. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 3. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -38, -29,  -9,   1,  -2,  -7,  -8,  -8, -12, -16, -14,  -5,   5,
+    -68, -50, -13,   5,   0,  -9,  -9,  -8, -13, -20, -19,  -3,  15,
+
+    /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -62, -45, -11,   5,   0,  -8,  -9,  -8, -12, -19, -17,  -3,  13,
+    -27, -22, -13,  -9, -11, -12, -12, -11, -11, -13, -13, -10,  -6,
+
+    /* Feature vec 6. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 7. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 8. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 9. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+    constexpr int int16max = std::numeric_limits<int16_t>::max();
+    int val = 0;
+    for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+        /* We want a differential filter response from both - order 1
+         * and 2 => Don't have a linear signal here - we use a signal
+         * using squares for example. Alternate sign flips might work
+         * just as well and will be computationally less work! */
+        int valsq = val * val;
+        if (valsq > int16max) {
+            val = 0;
+            valsq = 0;
+        }
+        vec[i] = valsq;
+    }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+    /* Initialise the HAL and platform. */
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Constants. */
+    const uint32_t  windowLen       = 512;
+    const uint32_t  windowStride    = 160;
+    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    const float     quantScale      = 0.1410219967365265;
+    const int       quantOffset     = -11;
+
+    /* Test wav memory. */
+    std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+                                  (windowLen - windowStride));
+
+    /* Populate with dummy input. */
+    PopulateTestWavVector(testWav);
+
+    /* Allocate mem for tensor. */
+    std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+    /* Initialise dimensions and the test tensor. */
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+        tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+    /* Initialise pre-processing module. */
+    arm::app::audio::asr::Preprocess prep{
+        numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+    /* Invoke pre-processing. */
+    REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+    /* Wrap the tensor with a std::vector for ease. */
+    int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+    std::vector <int8_t> vecResults =
+        std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+    /* Check sizes. */
+    REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+    /* Check that the elements have been calculated correctly. */
+    for (uint32_t j = 0; j < numMfccVectors; ++j) {
+        for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+            size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+            CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+        }
+    }
+}