MLECO-1252 ASR sample application using the public ArmNN C++ API.

Change-Id: I98cd505b8772a8c8fa88308121bc94135bb45068
Signed-off-by: Éanna Ó Catháin <eanna.ocathain@arm.com>
diff --git a/samples/SpeechRecognition/src/AudioCapture.cpp b/samples/SpeechRecognition/src/AudioCapture.cpp
new file mode 100644
index 0000000..f3b9092
--- /dev/null
+++ b/samples/SpeechRecognition/src/AudioCapture.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "AudioCapture.hpp"
+#include <alsa/asoundlib.h>
+#include <sndfile.h>
+#include <samplerate.h>
+
+namespace asr
+{
+    std::vector<float> AudioCapture::LoadAudioFile(std::string filePath)
+    {
+        SF_INFO inputSoundFileInfo;
+        SNDFILE* infile = NULL;
+        infile = sf_open(filePath.c_str(), SFM_READ, &inputSoundFileInfo);
+
+        float audioIn[inputSoundFileInfo.channels * inputSoundFileInfo.frames];
+        sf_read_float(infile, audioIn, inputSoundFileInfo.channels * inputSoundFileInfo.frames);
+
+        float sampleRate = 16000.0f;
+        float srcRatio = sampleRate / (float)inputSoundFileInfo.samplerate;
+        int outputFrames = ceil(inputSoundFileInfo.frames * srcRatio);
+        float dataOut[outputFrames];
+
+        // Convert to mono
+        float monoData[inputSoundFileInfo.frames];
+        for(int i = 0; i < inputSoundFileInfo.frames; i++)
+        {
+            float val = 0.0f;
+            for(int j = 0; j < inputSoundFileInfo.channels; j++)
+                monoData[i] += audioIn[i * inputSoundFileInfo.channels + j];
+            monoData[i] /= inputSoundFileInfo.channels;
+        }
+
+        // Resample
+        SRC_DATA srcData;
+        srcData.data_in = monoData;
+        srcData.input_frames = inputSoundFileInfo.frames;
+        srcData.data_out = dataOut;
+        srcData.output_frames = outputFrames;
+        srcData.src_ratio = srcRatio;
+
+        src_simple(&srcData, SRC_SINC_BEST_QUALITY, 1);
+
+        // Convert to Vector
+        std::vector<float> processedInput;
+
+        for(int i = 0; i < srcData.output_frames_gen; ++i)
+        {
+            processedInput.push_back(srcData.data_out[i]);
+        }
+
+        sf_close(infile);
+
+        return processedInput;
+    }
+
+    void AudioCapture::InitSlidingWindow(float* data, size_t dataSize, int minSamples, size_t stride)
+    {
+        this->m_window = SlidingWindow<const float>(data, dataSize, minSamples, stride);
+    }
+
+    bool AudioCapture::HasNext()
+    {
+        return m_window.HasNext();
+    }
+
+    std::vector<float> AudioCapture::Next()
+    {
+        if (this->m_window.HasNext())
+        {
+            int remainingData = this->m_window.RemainingData();
+            const float* windowData = this->m_window.Next();
+
+            size_t windowSize = this->m_window.GetWindowSize();
+
+            if(remainingData < windowSize)
+            {
+                std::vector<float> mfccAudioData(windowSize, 0.0f);
+                for(int i = 0; i < remainingData; ++i)
+                {
+                    mfccAudioData[i] = *windowData;
+                    if(i < remainingData - 1)
+                    {
+                        ++windowData;
+                    }
+                }
+                return mfccAudioData;
+            }
+            else
+            {
+                std::vector<float> mfccAudioData(windowData,  windowData + windowSize);
+                return mfccAudioData;
+            }
+        }
+        else
+        {
+            throw std::out_of_range("Error, end of audio data reached.");
+        }
+    }
+} //namespace asr
+
diff --git a/samples/SpeechRecognition/src/Decoder.cpp b/samples/SpeechRecognition/src/Decoder.cpp
new file mode 100644
index 0000000..663d4db
--- /dev/null
+++ b/samples/SpeechRecognition/src/Decoder.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Decoder.hpp"
+
+namespace asr {
+
+    Decoder::Decoder(std::map<int, std::string>& labels):
+            m_labels(labels)
+    {}
+
+    std::string Decoder::FilterCharacters(std::vector<char>& unfiltered)
+    {
+        std::string filtered = "";
+
+        for(int i = 0; i < unfiltered.size(); ++i)
+        {
+            if (unfiltered.at(i) == '$')
+            {
+                continue;
+            }
+
+            else if (i + 1 < unfiltered.size() && unfiltered.at(i) == unfiltered.at(i + 1))
+            {
+                continue;
+            }
+            else
+            {
+                filtered += unfiltered.at(i);
+            }
+        }
+        return filtered;
+    }
+}// namespace
+
diff --git a/samples/SpeechRecognition/src/MFCC.cpp b/samples/SpeechRecognition/src/MFCC.cpp
new file mode 100644
index 0000000..234b14d
--- /dev/null
+++ b/samples/SpeechRecognition/src/MFCC.cpp
@@ -0,0 +1,397 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <cstdio>
+#include <float.h>
+
+#include "MFCC.hpp"
+#include "MathUtils.hpp"
+
+
+MfccParams::MfccParams(
+        const float samplingFreq,
+        const int numFbankBins,
+        const float melLoFreq,
+        const float melHiFreq,
+        const int numMfccFeats,
+        const int frameLen,
+        const bool useHtkMethod,
+        const int numMfccVectors):
+        m_samplingFreq(samplingFreq),
+        m_numFbankBins(numFbankBins),
+        m_melLoFreq(melLoFreq),
+        m_melHiFreq(melHiFreq),
+        m_numMfccFeatures(numMfccFeats),
+        m_frameLen(frameLen),
+        m_numMfccVectors(numMfccVectors),
+
+        /* Smallest power of 2 >= frame length. */
+        m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+        m_useHtkMethod(useHtkMethod)
+{}
+
+std::string MfccParams::Str()
+{
+    char strC[1024];
+    snprintf(strC, sizeof(strC) - 1, "\n   \
+            \n\t Sampling frequency:         %f\
+            \n\t Number of filter banks:     %u\
+            \n\t Mel frequency limit (low):  %f\
+            \n\t Mel frequency limit (high): %f\
+            \n\t Number of MFCC features:    %u\
+            \n\t Frame length:               %u\
+            \n\t Padded frame length:        %u\
+            \n\t Using HTK for Mel scale:    %s\n",
+             this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
+             this->m_melHiFreq, this->m_numMfccFeatures, this->m_frameLen,
+             this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
+    return std::string{strC};
+}
+
+MFCC::MFCC(const MfccParams& params):
+        _m_params(params),
+        _m_filterBankInitialised(false)
+{
+    this->_m_buffer = std::vector<float>(
+            this->_m_params.m_frameLenPadded, 0.0);
+    this->_m_frame = std::vector<float>(
+            this->_m_params.m_frameLenPadded, 0.0);
+    this->_m_melEnergies = std::vector<float>(
+            this->_m_params.m_numFbankBins, 0.0);
+
+    this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
+    const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
+
+    /* Create window function. */
+    for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_windowFunc[i] = (0.5 - (0.5 * cos(static_cast<float>(i) * multiplier)));
+    }
+}
+
+void MFCC::Init()
+{
+    this->_InitMelFilterBank();
+}
+
+float MFCC::MelScale(const float freq, const bool useHTKMethod)
+{
+    if (useHTKMethod)
+    {
+        return 1127.0f * logf (1.0f + freq / 700.0f);
+    }
+    else
+    {
+        /* Slaney formula for mel scale. */
+        float mel = freq / freqStep;
+
+        if (freq >= minLogHz)
+        {
+            mel = minLogMel + logf(freq / minLogHz) / logStep;
+        }
+        return mel;
+    }
+}
+
+float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod)
+{
+    if (useHTKMethod)
+    {
+        return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+    }
+    else
+    {
+        /* Slaney formula for mel scale. */
+        float freq = freqStep * melFreq;
+
+        if (melFreq >= minLogMel)
+        {
+            freq = minLogHz * expf(logStep * (melFreq - minLogMel));
+        }
+        return freq;
+    }
+}
+
+
+bool MFCC::ApplyMelFilterBank(
+        std::vector<float>&                 fftVec,
+        std::vector<std::vector<float>>&    melFilterBank,
+        std::vector<int32_t>&               filterBankFilterFirst,
+        std::vector<int32_t>&               filterBankFilterLast,
+        std::vector<float>&                 melEnergies)
+{
+    const size_t numBanks = melEnergies.size();
+
+    if (numBanks != filterBankFilterFirst.size() ||
+        numBanks != filterBankFilterLast.size())
+    {
+        printf("unexpected filter bank lengths\n");
+        return false;
+    }
+
+    for (size_t bin = 0; bin < numBanks; ++bin)
+    {
+        auto filterBankIter = melFilterBank[bin].begin();
+        float melEnergy = 1e-10; /* Avoid log of zero at later stages */
+        const int32_t firstIndex = filterBankFilterFirst[bin];
+        const int32_t lastIndex = filterBankFilterLast[bin];
+
+        for (int32_t i = firstIndex; i <= lastIndex; ++i)
+        {
+            melEnergy += (*filterBankIter++ * fftVec[i]);
+        }
+
+        melEnergies[bin] = melEnergy;
+    }
+
+    return true;
+}
+
+void MFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+{
+    float maxMelEnergy = -FLT_MAX;
+
+    /* Container for natural logarithms of mel energies */
+    std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+    /* Because we are taking natural logs, we need to multiply by log10(e).
+     * Also, for wav2letter model, we scale our log10 values by 10 */
+    constexpr float multiplier = 10.0 * /* default scalar */
+                                 0.4342944819032518; /* log10f(std::exp(1.0))*/
+
+    /* Take log of the whole vector */
+    MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+    /* Scale the log values and get the max */
+    for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+         iterM != melEnergies.end(); ++iterM, ++iterL)
+    {
+        *iterM = *iterL * multiplier;
+
+        /* Save the max mel energy. */
+        if (*iterM > maxMelEnergy)
+        {
+            maxMelEnergy = *iterM;
+        }
+    }
+
+    /* Clamp the mel energies */
+    constexpr float maxDb = 80.0;
+    const float clampLevelLowdB = maxMelEnergy - maxDb;
+    for (auto iter = melEnergies.begin(); iter != melEnergies.end(); ++iter)
+    {
+        *iter = std::max(*iter, clampLevelLowdB);
+    }
+}
+
+void MFCC::_ConvertToPowerSpectrum()
+{
+    const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
+
+    /* Handle this special case. */
+    float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
+    float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
+
+    MathUtils::ComplexMagnitudeSquaredF32(
+            this->_m_buffer.data(),
+            this->_m_buffer.size(),
+            this->_m_buffer.data(),
+            this->_m_buffer.size()/2);
+
+    this->_m_buffer[0] = firstEnergy;
+    this->_m_buffer[halfDim] = lastEnergy;
+}
+
+std::vector<float> MFCC::CreateDCTMatrix(
+        const int32_t inputLength,
+        const int32_t coefficientCount)
+{
+    std::vector<float> dctMatix(inputLength * coefficientCount);
+
+    /* Orthonormal normalization. */
+    const float normalizerK0 = 2 * sqrt(1.0 / static_cast<float>(4*inputLength));
+    const float normalizer = 2 * sqrt(1.0 / static_cast<float>(2*inputLength));
+
+    const float angleIncr = M_PI/inputLength;
+    float angle = angleIncr; /* we start using it at k = 1 loop */
+
+    /* First row of DCT will use normalizer K0 */
+    for (int32_t n = 0; n < inputLength; ++n)
+    {
+        dctMatix[n] = normalizerK0;
+    }
+
+    /* Second row (index = 1) onwards, we use standard normalizer */
+    for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength)
+    {
+        for (int32_t n = 0; n < inputLength; ++n)
+        {
+            dctMatix[m+n] = normalizer *
+                            cos((n + 0.5) * angle);
+        }
+        angle += angleIncr;
+    }
+    return dctMatix;
+}
+
+float MFCC::GetMelFilterBankNormaliser(
+        const float&    leftMel,
+        const float&    rightMel,
+        const bool      useHTKMethod)
+{
+/* Slaney normalization for mel weights. */
+    return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
+                    MFCC::InverseMelScale(leftMel, useHTKMethod)));
+}
+
+void MFCC::_InitMelFilterBank()
+{
+    if (!this->_IsMelFilterBankInited())
+    {
+        this->_m_melFilterBank = this->_CreateMelFilterBank();
+        this->_m_dctMatrix = this->CreateDCTMatrix(
+                this->_m_params.m_numFbankBins,
+                this->_m_params.m_numMfccFeatures);
+        this->_m_filterBankInitialised = true;
+    }
+}
+
+bool MFCC::_IsMelFilterBankInited()
+{
+    return this->_m_filterBankInitialised;
+}
+
+void MFCC::_MfccComputePreFeature(const std::vector<float>& audioData)
+{
+    this->_InitMelFilterBank();
+
+    /* TensorFlow way of normalizing .wav data to (-1, 1). */
+    constexpr float normaliser = 1.0;
+    for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+    }
+
+    /* Apply window function to input frame. */
+    for(size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_frame[i] *= this->_m_windowFunc[i];
+    }
+
+    /* Set remaining frame values to 0. */
+    std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
+
+    /* Compute FFT. */
+    MathUtils::FftF32(this->_m_frame, this->_m_buffer);
+
+    /* Convert to power spectrum. */
+    this->_ConvertToPowerSpectrum();
+
+    /* Apply mel filterbanks. */
+    if (!this->ApplyMelFilterBank(this->_m_buffer,
+                                  this->_m_melFilterBank,
+                                  this->_m_filterBankFilterFirst,
+                                  this->_m_filterBankFilterLast,
+                                  this->_m_melEnergies))
+    {
+        printf("Failed to apply MEL filter banks\n");
+    }
+
+    /* Convert to logarithmic scale */
+    this->ConvertToLogarithmicScale(this->_m_melEnergies);
+}
+
+std::vector<float> MFCC::MfccCompute(const std::vector<float>& audioData)
+{
+    this->_MfccComputePreFeature(audioData);
+
+    std::vector<float> mfccOut(this->_m_params.m_numMfccFeatures);
+
+    float * ptrMel = this->_m_melEnergies.data();
+    float * ptrDct = this->_m_dctMatrix.data();
+    float * ptrMfcc = mfccOut.data();
+
+    /* Take DCT. Uses matrix mul. */
+    for (size_t i = 0, j = 0; i < mfccOut.size();
+         ++i, j += this->_m_params.m_numFbankBins)
+    {
+        *ptrMfcc++ = MathUtils::DotProductF32(
+                ptrDct + j,
+                ptrMel,
+                this->_m_params.m_numFbankBins);
+    }
+
+    return mfccOut;
+}
+
+std::vector<std::vector<float>> MFCC::_CreateMelFilterBank()
+{
+    size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
+    float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
+
+    float melLowFreq = MFCC::MelScale(this->_m_params.m_melLoFreq,
+                                      this->_m_params.m_useHtkMethod);
+    float melHighFreq = MFCC::MelScale(this->_m_params.m_melHiFreq,
+                                       this->_m_params.m_useHtkMethod);
+    float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
+
+    std::vector<float> thisBin = std::vector<float>(numFftBins);
+    std::vector<std::vector<float>> melFilterBank(
+            this->_m_params.m_numFbankBins);
+    this->_m_filterBankFilterFirst =
+            std::vector<int32_t>(this->_m_params.m_numFbankBins);
+    this->_m_filterBankFilterLast =
+            std::vector<int32_t>(this->_m_params.m_numFbankBins);
+
+    for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++)
+    {
+        float leftMel = melLowFreq + bin * melFreqDelta;
+        float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+        float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+        int32_t firstIndex = -1;
+        int32_t lastIndex = -1;
+        const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
+
+        for (size_t i = 0; i < numFftBins; i++)
+        {
+            float freq = (fftBinWidth * i); /* Center freq of this fft bin. */
+            float mel = MFCC::MelScale(freq, this->_m_params.m_useHtkMethod);
+            thisBin[i] = 0.0;
+
+            if (mel > leftMel && mel < rightMel)
+            {
+                float weight;
+                if (mel <= centerMel)
+                {
+                    weight = (mel - leftMel) / (centerMel - leftMel);
+                }
+                else
+                {
+                    weight = (rightMel - mel) / (rightMel - centerMel);
+                }
+
+                thisBin[i] = weight * normaliser;
+                if (firstIndex == -1)
+                {
+                    firstIndex = i;
+                }
+                lastIndex = i;
+            }
+        }
+
+        this->_m_filterBankFilterFirst[bin] = firstIndex;
+        this->_m_filterBankFilterLast[bin] = lastIndex;
+
+        /* Copy the part we care about. */
+        for (int32_t i = firstIndex; i <= lastIndex; i++)
+        {
+            melFilterBank[bin].push_back(thisBin[i]);
+        }
+    }
+
+    return melFilterBank;
+}
+
diff --git a/samples/SpeechRecognition/src/Main.cpp b/samples/SpeechRecognition/src/Main.cpp
new file mode 100644
index 0000000..de37e23
--- /dev/null
+++ b/samples/SpeechRecognition/src/Main.cpp
@@ -0,0 +1,157 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <iostream>
+#include <map>
+#include <vector>
+#include <algorithm>
+#include <cmath>
+
+#include "CmdArgsParser.hpp"
+#include "ArmnnNetworkExecutor.hpp"
+#include "AudioCapture.hpp"
+#include "Preprocess.hpp"
+#include "Decoder.hpp"
+#include "SpeechRecognitionPipeline.hpp"
+
+
+using InferenceResult = std::vector<int8_t>;
+using InferenceResults = std::vector<InferenceResult>;
+
+const std::string AUDIO_FILE_PATH = "--audio-file-path";
+const std::string MODEL_FILE_PATH = "--model-file-path";
+const std::string LABEL_PATH = "--label-path";
+const std::string PREFERRED_BACKENDS = "--preferred-backends";
+const std::string HELP = "--help";
+
+std::map<int, std::string> labels = {
+        {0, "a" },
+        {1, "b" },
+        {2, "c" },
+        {3, "d" },
+        {4, "e" },
+        {5, "f" },
+        {6, "g" },
+        {7, "h" },
+        {8, "i" },
+        {9, "j" },
+        {10,"k" },
+        {11,"l" },
+        {12,"m" },
+        {13,"n" },
+        {14,"o" },
+        {15,"p" },
+        {16,"q" },
+        {17,"r" },
+        {18,"s" },
+        {19,"t" },
+        {20,"u" },
+        {21,"v" },
+        {22,"w" },
+        {23,"x" },
+        {24,"y" },
+        {25,"z" },
+        {26, "\'" },
+        {27, " "},
+        {28,"$" }
+};
+
+/*
+ * The accepted options for this Speech Recognition executable
+ */
+static std::map<std::string, std::string> CMD_OPTIONS = {
+        {AUDIO_FILE_PATH, "[REQUIRED] Path to the Audio file to run speech recognition on"},
+        {MODEL_FILE_PATH, "[REQUIRED] Path to the Speech Recognition model to use"},
+        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+                             " Defaults to CpuAcc,CpuRef"}
+};
+
+/*
+ * Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
+ */
+std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferredBackends)
+{
+    std::vector<armnn::BackendId> backends;
+    std::stringstream ss(preferredBackends);
+
+    while(ss.good())
+    {
+        std::string backend;
+        std::getline( ss, backend, ',' );
+        backends.emplace_back(backend);
+    }
+    return backends;
+}
+
+int main(int argc, char *argv[])
+{
+    // Wav2Letter ASR SETTINGS
+    int             SAMP_FREQ                  = 16000;
+    int             FRAME_LEN_MS               = 32;
+    int             FRAME_LEN_SAMPLES          = SAMP_FREQ * FRAME_LEN_MS * 0.001;
+    int             NUM_MFCC_FEATS             = 13;
+    int             MFCC_WINDOW_LEN            = 512;
+    int             MFCC_WINDOW_STRIDE         = 160;
+    const int       NUM_MFCC_VECTORS           = 296;
+    int             SAMPLES_PER_INFERENCE      = MFCC_WINDOW_LEN + ((NUM_MFCC_VECTORS -1) * MFCC_WINDOW_STRIDE);
+    int             MEL_LO_FREQ                = 0;
+    int             MEL_HI_FREQ                = 8000;
+    int             NUM_FBANK_BIN              = 128;
+    int             INPUT_WINDOW_LEFT_CONTEXT  = 98;
+    int             INPUT_WINDOW_RIGHT_CONTEXT = 98;
+    int             INPUT_WINDOW_INNER_CONTEXT = NUM_MFCC_VECTORS -
+            (INPUT_WINDOW_LEFT_CONTEXT + INPUT_WINDOW_RIGHT_CONTEXT);
+    int             SLIDING_WINDOW_OFFSET      = INPUT_WINDOW_INNER_CONTEXT * MFCC_WINDOW_STRIDE;
+
+
+    MfccParams mfccParams(SAMP_FREQ, NUM_FBANK_BIN,
+            MEL_LO_FREQ, MEL_HI_FREQ, NUM_MFCC_FEATS, FRAME_LEN_SAMPLES, false, NUM_MFCC_VECTORS);
+
+    MFCC mfccInst = MFCC(mfccParams);
+
+    Preprocess preprocessor(MFCC_WINDOW_LEN, MFCC_WINDOW_STRIDE, mfccInst);
+
+    bool isFirstWindow = true;
+    std::string currentRContext  = "";
+
+    std::map <std::string, std::string> options;
+
+    int result = ParseOptions(options, CMD_OPTIONS, argv, argc);
+    if (result != 0)
+    {
+        return result;
+    }
+
+    // Create the network options
+    common::PipelineOptions pipelineOptions;
+    pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
+
+    if (CheckOptionSpecified(options, PREFERRED_BACKENDS))
+    {
+        pipelineOptions.m_backends = GetPreferredBackendList((GetSpecifiedOption(options, PREFERRED_BACKENDS)));
+    }
+    else
+    {
+        pipelineOptions.m_backends = {"CpuAcc", "CpuRef"};
+    }
+
+    asr::IPipelinePtr asrPipeline = asr::CreatePipeline(pipelineOptions, labels);
+
+    asr::AudioCapture capture;
+    std::vector<float> audioData = capture.LoadAudioFile(GetSpecifiedOption(options, AUDIO_FILE_PATH));
+    capture.InitSlidingWindow(audioData.data(), audioData.size(), SAMPLES_PER_INFERENCE, SLIDING_WINDOW_OFFSET);
+
+    while (capture.HasNext())
+    {
+        std::vector<float> audioBlock = capture.Next();
+        InferenceResults results;
+
+        std::vector<int8_t> preprocessedData = asrPipeline->PreProcessing<float, int8_t>(audioBlock, preprocessor);
+        asrPipeline->Inference<int8_t>(preprocessedData, results);
+        asrPipeline->PostProcessing<int8_t>(results, isFirstWindow, !capture.HasNext(), currentRContext);
+    }
+
+    return 0;
+}
\ No newline at end of file
diff --git a/samples/SpeechRecognition/src/MathUtils.cpp b/samples/SpeechRecognition/src/MathUtils.cpp
new file mode 100644
index 0000000..bf99083
--- /dev/null
+++ b/samples/SpeechRecognition/src/MathUtils.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MathUtils.hpp"
+#include <vector>
+#include <cmath>
+#include <cstdio>
+
+void MathUtils::FftF32(std::vector<float>& input,
+                       std::vector<float>& fftOutput)
+{
+    const int inputLength = input.size();
+
+    for (int k = 0; k <= inputLength / 2; k++)
+    {
+        float sumReal = 0, sumImag = 0;
+
+        for (int t = 0; t < inputLength; t++)
+        {
+            float angle = 2 * M_PI * t * k / inputLength;
+            sumReal += input[t] * cosf(angle);
+            sumImag += -input[t] * sinf(angle);
+        }
+
+        /* Arrange output to [real0, realN/2, real1, im1, real2, im2, ...] */
+        if (k == 0)
+        {
+            fftOutput[0] = sumReal;
+        }
+        else if (k == inputLength / 2)
+        {
+            fftOutput[1] = sumReal;
+        }
+        else
+        {
+            fftOutput[k*2] = sumReal;
+            fftOutput[k*2 + 1] = sumImag;
+        };
+    }
+}
+
+float MathUtils::DotProductF32(float* srcPtrA, float* srcPtrB,
+                               const int srcLen)
+{
+    float output = 0.f;
+
+    for (int i = 0; i < srcLen; ++i)
+    {
+        output += *srcPtrA++ * *srcPtrB++;
+    }
+    return output;
+}
+
+bool MathUtils::ComplexMagnitudeSquaredF32(float* ptrSrc,
+                                           const int srcLen,
+                                           float* ptrDst,
+                                           const int dstLen)
+{
+    if (dstLen < srcLen/2)
+    {
+        printf("dstLen must be greater than srcLen/2");
+        return false;
+    }
+
+    for (int j = 0; j < srcLen; ++j)
+    {
+        const float real = *ptrSrc++;
+        const float im = *ptrSrc++;
+        *ptrDst++ = real*real + im*im;
+    }
+    return true;
+}
+
+void MathUtils::VecLogarithmF32(std::vector <float>& input,
+                                std::vector <float>& output)
+{
+    for (auto in = input.begin(), out = output.begin();
+         in != input.end(); ++in, ++out)
+    {
+        *out = logf(*in);
+    }
+}
+
+float MathUtils::MeanF32(float* ptrSrc, const uint32_t srcLen)
+{
+    if (!srcLen)
+    {
+        return 0.f;
+    }
+
+    float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0);
+    return acc/srcLen;
+}
+
+float MathUtils::StdDevF32(float* ptrSrc, const uint32_t srcLen,
+                           const float mean)
+{
+    if (!srcLen)
+    {
+        return 0.f;
+    }
+    auto VarianceFunction = [=](float acc, const float value) {
+        return acc + (((value - mean) * (value - mean))/ srcLen);
+    };
+
+    float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0,
+                                VarianceFunction);
+    return sqrtf(acc);
+}
+
diff --git a/samples/SpeechRecognition/src/Preprocess.cpp b/samples/SpeechRecognition/src/Preprocess.cpp
new file mode 100644
index 0000000..8627961
--- /dev/null
+++ b/samples/SpeechRecognition/src/Preprocess.cpp
@@ -0,0 +1,192 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <algorithm>
+#include <numeric>
+#include <math.h>
+#include <string.h>
+
+#include "MathUtils.hpp"
+#include "Preprocess.hpp"
+
+Preprocess::Preprocess(
+        const uint32_t  windowLen,
+        const uint32_t  windowStride,
+        const MFCC mfccInst):
+        _m_mfcc(mfccInst),
+        _m_mfccBuf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_delta1Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_delta2Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_windowLen(windowLen),
+        _m_windowStride(windowStride)
+{
+    if (mfccInst._m_params.m_numMfccFeatures > 0 && windowLen > 0)
+    {
+        this->_m_mfcc.Init();
+    }
+}
+
+Preprocess::~Preprocess()
+{
+}
+
+bool Preprocess::Invoke( const float*  audioData, const uint32_t  audioDataLen, std::vector<int8_t>& output,
+        int quantOffset, float quantScale)
+{
+    this->_m_window = SlidingWindow<const float>(
+            audioData, audioDataLen,
+            this->_m_windowLen, this->_m_windowStride);
+
+    uint32_t mfccBufIdx = 0;
+
+    // Init buffers with 0
+    std::fill(_m_mfccBuf.begin(), _m_mfccBuf.end(), 0.f);
+    std::fill(_m_delta1Buf.begin(), _m_delta1Buf.end(), 0.f);
+    std::fill(_m_delta2Buf.begin(), _m_delta2Buf.end(), 0.f);
+
+    /* While we can slide over the window */
+    while (this->_m_window.HasNext())
+    {
+        const float*  mfccWindow = this->_m_window.Next();
+        auto mfccAudioData = std::vector<float>(
+                mfccWindow,
+                mfccWindow + this->_m_windowLen);
+
+        auto mfcc = this->_m_mfcc.MfccCompute(mfccAudioData);
+        for (size_t i = 0; i < this->_m_mfccBuf.size(0); ++i)
+        {
+            this->_m_mfccBuf(i, mfccBufIdx) = mfcc[i];
+        }
+        ++mfccBufIdx;
+    }
+
+    /* Pad MFCC if needed by repeating last feature vector */
+    while (mfccBufIdx != this->_m_mfcc._m_params.m_numMfccVectors)
+    {
+        memcpy(&this->_m_mfccBuf(0, mfccBufIdx),
+               &this->_m_mfccBuf(0, mfccBufIdx-1), sizeof(float)*this->_m_mfcc._m_params.m_numMfccFeatures);
+        ++mfccBufIdx;
+    }
+
+    /* Compute first and second order deltas from MFCCs */
+    this->_ComputeDeltas(this->_m_mfccBuf,
+                         this->_m_delta1Buf,
+                         this->_m_delta2Buf);
+
+    /* Normalise */
+    this->_Normalise();
+
+    return this->_Quantise<int8_t>(output.data(), quantOffset, quantScale);
+}
+
+bool Preprocess::_ComputeDeltas(Array2d<float>& mfcc,
+                                Array2d<float>& delta1,
+                                Array2d<float>& delta2)
+{
+    const std::vector <float> delta1Coeffs =
+            {6.66666667e-02,  5.00000000e-02,  3.33333333e-02,
+             1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
+             -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
+
+    const std::vector <float> delta2Coeffs =
+            {0.06060606,      0.01515152,     -0.01731602,
+             -0.03679654,     -0.04329004,     -0.03679654,
+             -0.01731602,      0.01515152,      0.06060606};
+
+    if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
+        mfcc.size(0) == 0 || mfcc.size(1) == 0)
+    {
+        return false;
+    }
+
+    /* Get the middle index; coeff vec len should always be odd */
+    const size_t coeffLen = delta1Coeffs.size();
+    const size_t fMidIdx = (coeffLen - 1)/2;
+    const size_t numFeatures = mfcc.size(0);
+    const size_t numFeatVectors = mfcc.size(1);
+
+    /* iterate through features in MFCC vector*/
+    for (size_t i = 0; i < numFeatures; ++i)
+    {
+        /* for each feature, iterate through time (t) samples representing feature evolution and
+        * calculate d/dt and d^2/dt^2, using 1d convolution with differential kernels.
+        * Convolution padding = valid, result size is `time length - kernel length + 1`.
+        * The result is padded with 0 from both sides to match the size of initial time samples data.
+        *
+        * For the small filter, conv1d implementation as a simple loop is efficient enough.
+        * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
+        */
+
+        for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j)
+        {
+            float d1 = 0;
+            float d2 = 0;
+            const size_t mfccStIdx = j - fMidIdx;
+
+            for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m)
+            {
+
+                d1 +=  mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
+                d2 +=  mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
+            }
+
+            delta1(i,j) = d1;
+            delta2(i,j) = d2;
+        }
+    }
+
+    return true;
+}
+
+float Preprocess::_GetMean(Array2d<float>& vec)
+{
+    return MathUtils::MeanF32(vec.begin(), vec.totalSize());
+}
+
+float Preprocess::_GetStdDev(Array2d<float>& vec, const float mean)
+{
+    return MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
+}
+
+void Preprocess::_NormaliseVec(Array2d<float>& vec)
+{
+    auto mean = Preprocess::_GetMean(vec);
+    auto stddev = Preprocess::_GetStdDev(vec, mean);
+
+    if (stddev == 0)
+    {
+        std::fill(vec.begin(), vec.end(), 0);
+    }
+    else
+    {
+        const float stddevInv = 1.f/stddev;
+        const float normalisedMean = mean/stddev;
+
+        auto NormalisingFunction = [=](float &value) {
+            value = value * stddevInv - normalisedMean;
+        };
+        std::for_each(vec.begin(), vec.end(), NormalisingFunction);
+    }
+}
+
+void Preprocess::_Normalise()
+{
+    Preprocess::_NormaliseVec(this->_m_mfccBuf);
+    Preprocess::_NormaliseVec(this->_m_delta1Buf);
+    Preprocess::_NormaliseVec(this->_m_delta2Buf);
+}
+
+float Preprocess::_GetQuantElem(
+        const float     elem,
+        const float     quantScale,
+        const int       quantOffset,
+        const float     minVal,
+        const float     maxVal)
+{
+    float val = std::round((elem/quantScale) + quantOffset);
+    float maxim = std::max<float>(val, minVal);
+    float returnVal = std::min<float>(std::max<float>(val, minVal), maxVal);
+    return returnVal;
+}
\ No newline at end of file
diff --git a/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
new file mode 100644
index 0000000..1b822d6
--- /dev/null
+++ b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpeechRecognitionPipeline.hpp"
+#include "ArmnnNetworkExecutor.hpp"
+
+namespace asr
+{
+ASRPipeline::ASRPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
+                         std::unique_ptr<Decoder> decoder
+                         ) :
+        m_executor(std::move(executor)),
+        m_decoder(std::move(decoder)){}
+
+IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels)
+{
+    auto executor = std::make_unique<common::ArmnnNetworkExecutor<int8_t>>(config.m_ModelFilePath, config.m_backends);
+
+    auto decoder = std::make_unique<asr::Decoder>(labels);
+
+    return std::make_unique<asr::ASRPipeline>(std::move(executor), std::move(decoder));
+}
+
+}// namespace asr
\ No newline at end of file