Opensource ML embedded evaluation kit

Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
new file mode 100644
index 0000000..f0e5c02
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "TestData_kws.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace kws {
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+                            inputTensor->bytes :
+                            IFM_DATA_SIZE;
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data()));
+    return true;
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    REQUIRE(RunInference(model, input_goldenFV));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running random inference with Tflu and DsCnnModel Int8", "[DS_CNN]")
+{
+    arm::app::DsCnnModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running inference with Tflu and DsCnnModel Uint8", "[DS_CNN]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        const int8_t* input_goldenFV = get_ifm_data_array(i);
+        const int8_t* output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::DsCnnModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..ee63c2f
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace asr {
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+    return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    arm::app::Wav2LetterModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        auto input_goldenFV = get_ifm_data_array(i);;
+        auto output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::Wav2LetterModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InitModels.cc b/tests/use_case/kws_asr/InitModels.cc
new file mode 100644
index 0000000..770944d
--- /dev/null
+++ b/tests/use_case/kws_asr/InitModels.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Init two Models", "[.]")
+{
+    arm::app::DsCnnModel model1;
+    arm::app::DsCnnModel model2;
+
+    /* Ideally we should load the wav2letter model here, but there is
+     * none available to run on native (ops not supported on unoptimised
+     * version). However, we can certainly create two instances of the
+     * same type of model to see if our tensor arena re-use works as
+     * intended.
+     *
+     * @TODO: uncomment this when this model can run on native pipeline. */
+    //arm::app::Wav2LetterModel model2;     /* model2. */
+
+    /* Load/initialise the first model. */
+    REQUIRE(model1.Init());
+
+    /* Allocator instance should have been created. */
+    REQUIRE(nullptr != model1.GetAllocator());
+
+    /* Load the second model using the same allocator as model 1. */
+    REQUIRE(model2.Init(model1.GetAllocator()));
+
+    /* Make sure they point to the same allocator object. */
+    REQUIRE(model1.GetAllocator() == model2.GetAllocator());
+
+    /* Both models should report being initialised. */
+    REQUIRE(true == model1.IsInited());
+    REQUIRE(true == model2.IsInited());
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/KwsAsrTests.cc b/tests/use_case/kws_asr/KwsAsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/kws_asr/KwsAsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/kws_asr/MfccTests.cc b/tests/use_case/kws_asr/MfccTests.cc
new file mode 100644
index 0000000..9509519
--- /dev/null
+++ b/tests/use_case/kws_asr/MfccTests.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 640 samples from yes.wav. */
+const std::vector<int16_t> testWav = std::vector<int16_t>{
+    139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
+    165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
+    132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
+    183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
+    193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
+    197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
+    157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
+    194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
+    200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
+    172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
+    177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
+    233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
+    176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
+    184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
+    132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
+    161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
+    101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
+    148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
+    125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
+    93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
+    110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
+    139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
+    96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
+    111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
+    85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
+    109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
+    142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
+    68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
+    92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
+    118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
+    86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
+    81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
+    69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
+    145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
+    85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
+    93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
+    129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
+    95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
+    107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
+    65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
+    24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
+    60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
+    19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
+    50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
+    51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
+    -2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
+    2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
+    -9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
+    -10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
+    34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
+    -2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
+    -42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
+    -69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
+    -43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
+    -40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
+    -56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
+    -62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
+    -52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
+    -51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
+    -42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
+    -93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
+    -25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
+    -78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
+    -40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
+};
+
+/* Golden audio ops mfcc output for the above wav. */
+const std::vector<float> testWavMfcc {
+    -22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
+};
+
+arm::app::audio::DsCnnMFCC GetMFCCInstance() {
+    const int sampFreq = arm::app::audio::DsCnnMFCC::ms_defaultSamplingFreq;
+    const int frameLenMs = 40;
+    const int frameLenSamples = sampFreq * frameLenMs * 0.001;
+    const int numMfccFeats = 10;
+
+   return arm::app::audio::DsCnnMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuntisedMFCC() {
+    const float quantScale = 1.1088106632232666;
+    const int quantOffset = 95;
+    std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav, quantScale, quantOffset);
+
+    const long min_val = std::numeric_limits<T>::min();
+    const long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < testWavMfcc.size(); ++i){
+        long TestWavMfcc = (std::lround((testWavMfcc[i] / quantScale) + quantOffset));
+        T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+        REQUIRE(quantizedTestWavMfcc  == Approx(mfccOutput[i]).margin(0));
+    }
+}
+template void TestQuntisedMFCC<int8_t>();
+template void TestQuntisedMFCC<uint8_t>();
+template void TestQuntisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation test")
+{
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32")
+    {
+        auto mfccOutput = GetMFCCInstance().MfccCompute(testWav);
+        REQUIRE_THAT( mfccOutput, Catch::Approx( testWavMfcc ).margin(0.0001) );
+    }
+
+    SECTION("int8_t")
+    {
+        TestQuntisedMFCC<int8_t>();
+    }
+
+    SECTION("uint8_t")
+    {
+        TestQuntisedMFCC<uint8_t>();
+    }
+
+    SECTION("MFCC quant calculation test - int16_t")
+    {
+        TestQuntisedMFCC<int16_t>();
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..6fd7df3
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(std::vector <int>& shape,
+                                  T                  initVal,
+                                  std::vector<T>&    vectorBuf)
+{
+    REQUIRE(0 != shape.size());
+
+    shape.insert(shape.begin(), shape.size());
+    uint32_t sizeInBytes = sizeof(T);
+    for (size_t i = 1; i < shape.size(); ++i) {
+        sizeInBytes *= shape[i];
+    }
+
+    /* Allocate mem. */
+    vectorBuf = std::vector<T>(sizeInBytes, initVal);
+    TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+    return tflite::testing::CreateQuantizedTensor(
+                                vectorBuf.data(), dims,
+                                1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+    SECTION("Mismatched post processing parameters and tensor size")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 1, 13};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+        REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+
+    SECTION("Post processing succeeds")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 13, 1};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+}
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+    constexpr uint32_t ctxLen = 5;
+    constexpr uint32_t innerLen = 3;
+    constexpr uint32_t nRows = 2*ctxLen + innerLen;
+    constexpr uint32_t nCols = 10;
+    constexpr uint32_t blankTokenIdx = nCols - 1;
+    std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+    SECTION("First and last iteration")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+
+    SECTION("Right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase the right context only. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+        REQUIRE(originalVec != tensorVec);
+
+        /* The last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                }
+
+                /* Check left context is preserved. */
+                CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Left and right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase right context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        REQUIRE(originalVec != tensorVec);
+
+        /* The first and last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check left and right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 1);
+                    CHECK(tensorVec[i * nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 0);
+                    CHECK(tensorVec[i * nCols + j] == 0);
+                }
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            /* Check left context is preserved. */
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Try left context erase")
+    {
+        /* Should not be able to erase the left context if it is the first iteration. */
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..e71366a
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors  = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures*3] = {
+    /* Feature vec 0. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,    /* MFCCs.   */
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,    /* Delta 1. */
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,    /* Delta 2. */
+
+    /* Feature vec 1. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 3. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -38, -29,  -9,   1,  -2,  -7,  -8,  -8, -12, -16, -14,  -5,   5,
+    -68, -50, -13,   5,   0,  -9,  -9,  -8, -13, -20, -19,  -3,  15,
+
+    /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -62, -45, -11,   5,   0,  -8,  -9,  -8, -12, -19, -17,  -3,  13,
+    -27, -22, -13,  -9, -11, -12, -12, -11, -11, -13, -13, -10,  -6,
+
+    /* Feature vec 6. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 7. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 8. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 9. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+    constexpr int int16max = std::numeric_limits<int16_t>::max();
+    int val = 0;
+    for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+        /* We want a differential filter response from both - order 1
+         * and 2 => Don't have a linear signal here - we use a signal
+         * using squares for example. Alternate sign flips might work
+         * just as well and will be computationally less work! */
+        int valsq = val * val;
+        if (valsq > int16max) {
+            val = 0;
+            valsq = 0;
+        }
+        vec[i] = valsq;
+    }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+    /* Initialise the HAL and platform. */
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Constants. */
+    const uint32_t  windowLen       = 512;
+    const uint32_t  windowStride    = 160;
+    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    const float     quantScale      = 0.1410219967365265;
+    const int       quantOffset     = -11;
+
+    /* Test wav memory. */
+    std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+                                  (windowLen - windowStride));
+
+    /* Populate with dummy input. */
+    PopulateTestWavVector(testWav);
+
+    /* Allocate mem for tensor. */
+    std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+    /* Initialise dimensions and the test tensor. */
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+        tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+    /* Initialise pre-processing module. */
+    arm::app::audio::asr::Preprocess prep{
+        numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+    /* Invoke pre-processing. */
+    REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+    /* Wrap the tensor with a std::vector for ease. */
+    int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+    std::vector <int8_t> vecResults =
+        std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+    /* Check sizes. */
+    REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+    /* Check that the elements have been calculated correctly. */
+    for (uint32_t j = 0; j < numMfccVectors; ++j) {
+        for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+            size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+            CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+        }
+    }
+}