MLECO-3183: Refactoring application sources

Platform agnostic application sources are moved into application
api module with their own independent CMake projects.

Changes for MLECO-3080 also included - they create CMake projects
individial API's (again, platform agnostic) that dependent on the
common logic. The API for KWS_API "joint" API has been removed and
now the use case relies on individual KWS, and ASR API libraries.

Change-Id: I1f7748dc767abb3904634a04e0991b74ac7b756d
Signed-off-by: Kshitij Sisodia <kshitij.sisodia@arm.com>
diff --git a/source/application/api/common/source/Classifier.cc b/source/application/api/common/source/Classifier.cc
new file mode 100644
index 0000000..6fabebe
--- /dev/null
+++ b/source/application/api/common/source/Classifier.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Classifier.hpp"
+
+#include "TensorFlowLiteMicro.hpp"
+#include "PlatformMath.hpp"
+#include "log_macros.h"
+
+#include <vector>
+#include <string>
+#include <set>
+#include <cstdint>
+#include <cinttypes>
+
+
+namespace arm {
+namespace app {
+
+    void Classifier::SetVectorResults(std::set<std::pair<float, uint32_t>>& topNSet,
+                          std::vector<ClassificationResult>& vecResults,
+                          const std::vector <std::string>& labels)
+    {
+
+        /* Reset the iterator to the largest element - use reverse iterator. */
+
+        auto topNIter = topNSet.rbegin();
+        for (size_t i = 0; i < vecResults.size() && topNIter != topNSet.rend(); ++i, ++topNIter) {
+            vecResults[i].m_normalisedVal = topNIter->first;
+            vecResults[i].m_label = labels[topNIter->second];
+            vecResults[i].m_labelIdx = topNIter->second;
+        }
+    }
+
+    bool Classifier::GetTopNResults(const std::vector<float>& tensor,
+                                    std::vector<ClassificationResult>& vecResults,
+                                    uint32_t topNCount,
+                                    const std::vector <std::string>& labels)
+    {
+
+        std::set<std::pair<float , uint32_t>> sortedSet;
+
+        /* NOTE: inputVec's size verification against labels should be
+         *       checked by the calling/public function. */
+
+        /* Set initial elements. */
+        for (uint32_t i = 0; i < topNCount; ++i) {
+            sortedSet.insert({tensor[i], i});
+        }
+
+        /* Initialise iterator. */
+        auto setFwdIter = sortedSet.begin();
+
+        /* Scan through the rest of elements with compare operations. */
+        for (uint32_t i = topNCount; i < labels.size(); ++i) {
+            if (setFwdIter->first < tensor[i]) {
+                sortedSet.erase(*setFwdIter);
+                sortedSet.insert({tensor[i], i});
+                setFwdIter = sortedSet.begin();
+            }
+        }
+
+        /* Final results' container. */
+        vecResults = std::vector<ClassificationResult>(topNCount);
+        SetVectorResults(sortedSet, vecResults, labels);
+
+        return true;
+    }
+
+    bool  Classifier::GetClassificationResults(
+        TfLiteTensor* outputTensor,
+        std::vector<ClassificationResult>& vecResults,
+        const std::vector <std::string>& labels,
+        uint32_t topNCount,
+        bool useSoftmax)
+    {
+        if (outputTensor == nullptr) {
+            printf_err("Output vector is null pointer.\n");
+            return false;
+        }
+
+        uint32_t totalOutputSize = 1;
+        for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++) {
+            totalOutputSize *= outputTensor->dims->data[inputDim];
+        }
+
+        /* Sanity checks. */
+        if (totalOutputSize < topNCount) {
+            printf_err("Output vector is smaller than %" PRIu32 "\n", topNCount);
+            return false;
+        } else if (totalOutputSize != labels.size()) {
+            printf_err("Output size doesn't match the labels' size\n");
+            return false;
+        } else if (topNCount == 0) {
+            printf_err("Top N results cannot be zero\n");
+            return false;
+        }
+
+        bool resultState;
+        vecResults.clear();
+
+        /* De-Quantize Output Tensor */
+        QuantParams quantParams = GetTensorQuantParams(outputTensor);
+
+        /* Floating point tensor data to be populated
+         * NOTE: The assumption here is that the output tensor size isn't too
+         * big and therefore, there's neglibible impact on heap usage. */
+        std::vector<float> tensorData(totalOutputSize);
+
+        /* Populate the floating point buffer */
+        switch (outputTensor->type) {
+            case kTfLiteUInt8: {
+                uint8_t *tensor_buffer = tflite::GetTensorData<uint8_t>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = quantParams.scale *
+                        (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+                }
+                break;
+            }
+            case kTfLiteInt8: {
+                int8_t *tensor_buffer = tflite::GetTensorData<int8_t>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = quantParams.scale *
+                        (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+                }
+                break;
+            }
+            case kTfLiteFloat32: {
+                float *tensor_buffer = tflite::GetTensorData<float>(outputTensor);
+                for (size_t i = 0; i < totalOutputSize; ++i) {
+                    tensorData[i] = tensor_buffer[i];
+                }
+                break;
+            }
+            default:
+                printf_err("Tensor type %s not supported by classifier\n",
+                    TfLiteTypeGetName(outputTensor->type));
+                return false;
+        }
+
+        if (useSoftmax) {
+            math::MathUtils::SoftmaxF32(tensorData);
+        }
+
+        /* Get the top N results. */
+        resultState = GetTopNResults(tensorData, vecResults, topNCount, labels);
+
+        if (!resultState) {
+            printf_err("Failed to get top N results set\n");
+            return false;
+        }
+
+        return true;
+    }
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/api/common/source/ImageUtils.cc b/source/application/api/common/source/ImageUtils.cc
new file mode 100644
index 0000000..31b9493
--- /dev/null
+++ b/source/application/api/common/source/ImageUtils.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ImageUtils.hpp"
+
+#include <limits>
+
+namespace arm {
+namespace app {
+namespace image {
+
+    float Calculate1DOverlap(float x1Center, float width1, float x2Center, float width2)
+    {
+        float left_1 = x1Center - width1/2;
+        float left_2 = x2Center - width2/2;
+        float leftest = left_1 > left_2 ? left_1 : left_2;
+
+        float right_1 = x1Center + width1/2;
+        float right_2 = x2Center + width2/2;
+        float rightest = right_1 < right_2 ? right_1 : right_2;
+
+        return rightest - leftest;
+    }
+
+    float CalculateBoxIntersect(Box& box1, Box& box2)
+    {
+        float width = Calculate1DOverlap(box1.x, box1.w, box2.x, box2.w);
+        if (width < 0) {
+            return 0;
+        }
+        float height = Calculate1DOverlap(box1.y, box1.h, box2.y, box2.h);
+        if (height < 0) {
+            return 0;
+        }
+
+        float total_area = width*height;
+        return total_area;
+    }
+
+    float CalculateBoxUnion(Box& box1, Box& box2)
+    {
+        float boxes_intersection = CalculateBoxIntersect(box1, box2);
+        float boxes_union = box1.w * box1.h + box2.w * box2.h - boxes_intersection;
+        return boxes_union;
+    }
+
+    float CalculateBoxIOU(Box& box1, Box& box2)
+    {
+        float boxes_intersection = CalculateBoxIntersect(box1, box2);
+        if (boxes_intersection == 0) {
+            return 0;
+        }
+
+        float boxes_union = CalculateBoxUnion(box1, box2);
+        if (boxes_union == 0) {
+            return 0;
+        }
+
+        return boxes_intersection / boxes_union;
+    }
+
+    void CalculateNMS(std::forward_list<Detection>& detections, int classes, float iouThreshold)
+    {
+        int idxClass{0};
+        auto CompareProbs = [idxClass](Detection& prob1, Detection& prob2) {
+            return prob1.prob[idxClass] > prob2.prob[idxClass];
+        };
+
+        for (idxClass = 0; idxClass < classes; ++idxClass) {
+            detections.sort(CompareProbs);
+
+            for (auto it=detections.begin(); it != detections.end(); ++it) {
+                if (it->prob[idxClass] == 0) continue;
+                for (auto itc=std::next(it, 1); itc != detections.end(); ++itc) {
+                    if (itc->prob[idxClass] == 0) {
+                        continue;
+                    }
+                    if (CalculateBoxIOU(it->bbox, itc->bbox) > iouThreshold) {
+                        itc->prob[idxClass] = 0;
+                    }
+                }
+            }
+        }
+    }
+
+    void ConvertImgToInt8(void* data, const size_t kMaxImageSize)
+    {
+        auto* tmp_req_data = static_cast<uint8_t*>(data);
+        auto* tmp_signed_req_data = static_cast<int8_t*>(data);
+
+        for (size_t i = 0; i < kMaxImageSize; i++) {
+            tmp_signed_req_data[i] = (int8_t) (
+                    (int32_t) (tmp_req_data[i]) - 128);
+        }
+    }
+
+    void RgbToGrayscale(const uint8_t* srcPtr, uint8_t* dstPtr, const size_t dstImgSz)
+    {
+        const float R = 0.299;
+        const float G = 0.587;
+        const float B = 0.114;
+        for (size_t i = 0; i < dstImgSz; ++i, srcPtr += 3) {
+            uint32_t  int_gray = R * (*srcPtr) +
+                                 G * (*(srcPtr + 1)) +
+                                 B * (*(srcPtr + 2));
+            *dstPtr++ = int_gray <= std::numeric_limits<uint8_t>::max() ?
+                        int_gray : std::numeric_limits<uint8_t>::max();
+        }
+    }
+
+} /* namespace image */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/api/common/source/Mfcc.cc b/source/application/api/common/source/Mfcc.cc
new file mode 100644
index 0000000..3bf5eb3
--- /dev/null
+++ b/source/application/api/common/source/Mfcc.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Mfcc.hpp"
+#include "PlatformMath.hpp"
+#include "log_macros.h"
+
+#include <cfloat>
+#include <cinttypes>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    MfccParams::MfccParams(
+                    const float samplingFreq,
+                    const uint32_t numFbankBins,
+                    const float melLoFreq,
+                    const float melHiFreq,
+                    const uint32_t numMfccFeats,
+                    const uint32_t frameLen,
+                    const bool useHtkMethod):
+                        m_samplingFreq(samplingFreq),
+                        m_numFbankBins(numFbankBins),
+                        m_melLoFreq(melLoFreq),
+                        m_melHiFreq(melHiFreq),
+                        m_numMfccFeatures(numMfccFeats),
+                        m_frameLen(frameLen),
+
+                        /* Smallest power of 2 >= frame length. */
+                        m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+                        m_useHtkMethod(useHtkMethod)
+    {}
+
+    void MfccParams::Log() const
+    {
+        debug("MFCC parameters:\n");
+        debug("\t Sampling frequency:         %f\n", this->m_samplingFreq);
+        debug("\t Number of filter banks:     %" PRIu32 "\n", this->m_numFbankBins);
+        debug("\t Mel frequency limit (low):  %f\n", this->m_melLoFreq);
+        debug("\t Mel frequency limit (high): %f\n", this->m_melHiFreq);
+        debug("\t Number of MFCC features:    %" PRIu32 "\n", this->m_numMfccFeatures);
+        debug("\t Frame length:               %" PRIu32 "\n", this->m_frameLen);
+        debug("\t Padded frame length:        %" PRIu32 "\n", this->m_frameLenPadded);
+        debug("\t Using HTK for Mel scale:    %s\n", this->m_useHtkMethod ? "yes" : "no");
+    }
+
+    MFCC::MFCC(const MfccParams& params):
+        m_params(params),
+        m_filterBankInitialised(false)
+    {
+        this->m_buffer = std::vector<float>(
+                            this->m_params.m_frameLenPadded, 0.0);
+        this->m_frame = std::vector<float>(
+                            this->m_params.m_frameLenPadded, 0.0);
+        this->m_melEnergies = std::vector<float>(
+                                this->m_params.m_numFbankBins, 0.0);
+
+        this->m_windowFunc = std::vector<float>(this->m_params.m_frameLen);
+        const auto multiplier = static_cast<float>(2 * M_PI / this->m_params.m_frameLen);
+
+        /* Create window function. */
+        for (size_t i = 0; i < this->m_params.m_frameLen; i++) {
+            this->m_windowFunc[i] = (0.5 - (0.5 *
+                math::MathUtils::CosineF32(static_cast<float>(i) * multiplier)));
+        }
+
+        math::MathUtils::FftInitF32(this->m_params.m_frameLenPadded, this->m_fftInstance);
+        this->m_params.Log();
+    }
+
+    void MFCC::Init()
+    {
+        this->InitMelFilterBank();
+    }
+
+    float MFCC::MelScale(const float freq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 1127.0f * logf (1.0f + freq / 700.0f);
+        } else {
+            /* Slaney formula for mel scale. */
+
+            float mel = freq / ms_freqStep;
+
+            if (freq >= ms_minLogHz) {
+                mel = ms_minLogMel + logf(freq / ms_minLogHz) / ms_logStep;
+            }
+            return mel;
+        }
+    }
+
+    float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+        } else {
+            /* Slaney formula for mel scale. */
+            float freq = ms_freqStep * melFreq;
+
+            if (melFreq >= ms_minLogMel) {
+                freq = ms_minLogHz * expf(ms_logStep * (melFreq - ms_minLogMel));
+            }
+            return freq;
+        }
+    }
+
+
+    bool MFCC::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<uint32_t>&               filterBankFilterFirst,
+            std::vector<uint32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+                numBanks != filterBankFilterLast.size()) {
+            printf_err("unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            auto end = melFilterBank[bin].end();
+            float melEnergy = FLT_MIN;  /* Avoid log of zero at later stages */
+            const uint32_t firstIndex = filterBankFilterFirst[bin];
+            const uint32_t lastIndex = std::min<uint32_t>(filterBankFilterLast[bin], fftVec.size() - 1);
+
+            for (uint32_t i = firstIndex; i <= lastIndex && filterBankIter != end; i++) {
+                float energyRep = math::MathUtils::SqrtF32(fftVec[i]);
+                melEnergy += (*filterBankIter++ * energyRep);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void MFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+    {
+        for (float& melEnergy : melEnergies) {
+            melEnergy = logf(melEnergy);
+        }
+    }
+
+    void MFCC::ConvertToPowerSpectrum()
+    {
+        const uint32_t halfDim = this->m_buffer.size() / 2;
+
+        /* Handle this special case. */
+        float firstEnergy = this->m_buffer[0] * this->m_buffer[0];
+        float lastEnergy = this->m_buffer[1] * this->m_buffer[1];
+
+        math::MathUtils::ComplexMagnitudeSquaredF32(
+                            this->m_buffer.data(),
+                            this->m_buffer.size(),
+                            this->m_buffer.data(),
+                            this->m_buffer.size()/2);
+
+        this->m_buffer[0] = firstEnergy;
+        this->m_buffer[halfDim] = lastEnergy;
+    }
+
+    std::vector<float> MFCC::CreateDCTMatrix(
+                                const int32_t inputLength,
+                                const int32_t coefficientCount)
+    {
+        std::vector<float> dctMatix(inputLength * coefficientCount);
+
+        const float normalizer = math::MathUtils::SqrtF32(2.0f/inputLength);
+        const float angleIncr = M_PI/inputLength;
+        float angle = 0;
+
+        for (int32_t k = 0, m = 0; k < coefficientCount; k++, m += inputLength) {
+            for (int32_t n = 0; n < inputLength; n++) {
+                dctMatix[m+n] = normalizer *
+                    math::MathUtils::CosineF32((n + 0.5f) * angle);
+            }
+            angle += angleIncr;
+        }
+
+        return dctMatix;
+    }
+
+    float MFCC::GetMelFilterBankNormaliser(
+                    const float&    leftMel,
+                    const float&    rightMel,
+                    const bool      useHTKMethod)
+    {
+        UNUSED(leftMel);
+        UNUSED(rightMel);
+        UNUSED(useHTKMethod);
+
+        /* By default, no normalisation => return 1 */
+        return 1.f;
+    }
+
+    void MFCC::InitMelFilterBank()
+    {
+        if (!this->IsMelFilterBankInited()) {
+            this->m_melFilterBank = this->CreateMelFilterBank();
+            this->m_dctMatrix = this->CreateDCTMatrix(
+                                    this->m_params.m_numFbankBins,
+                                    this->m_params.m_numMfccFeatures);
+            this->m_filterBankInitialised = true;
+        }
+    }
+
+    bool MFCC::IsMelFilterBankInited() const
+    {
+        return this->m_filterBankInitialised;
+    }
+
+    void MFCC::MfccComputePreFeature(const std::vector<int16_t>& audioData)
+    {
+        this->InitMelFilterBank();
+
+        /* TensorFlow way of normalizing .wav data to (-1, 1). */
+        constexpr float normaliser = 1.0/(1u<<15u);
+        for (size_t i = 0; i < this->m_params.m_frameLen; i++) {
+            this->m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+        }
+
+        /* Apply window function to input frame. */
+        for(size_t i = 0; i < this->m_params.m_frameLen; i++) {
+            this->m_frame[i] *= this->m_windowFunc[i];
+        }
+
+        /* Set remaining frame values to 0. */
+        std::fill(this->m_frame.begin() + this->m_params.m_frameLen,this->m_frame.end(), 0);
+
+        /* Compute FFT. */
+        math::MathUtils::FftF32(this->m_frame, this->m_buffer, this->m_fftInstance);
+
+        /* Convert to power spectrum. */
+        this->ConvertToPowerSpectrum();
+
+        /* Apply mel filterbanks. */
+        if (!this->ApplyMelFilterBank(this->m_buffer,
+                                      this->m_melFilterBank,
+                                      this->m_filterBankFilterFirst,
+                                      this->m_filterBankFilterLast,
+                                      this->m_melEnergies)) {
+            printf_err("Failed to apply MEL filter banks\n");
+        }
+
+        /* Convert to logarithmic scale. */
+        this->ConvertToLogarithmicScale(this->m_melEnergies);
+    }
+
+    std::vector<float> MFCC::MfccCompute(const std::vector<int16_t>& audioData)
+    {
+        this->MfccComputePreFeature(audioData);
+
+        std::vector<float> mfccOut(this->m_params.m_numMfccFeatures);
+
+        float * ptrMel = this->m_melEnergies.data();
+        float * ptrDct = this->m_dctMatrix.data();
+        float * ptrMfcc = mfccOut.data();
+
+        /* Take DCT. Uses matrix mul. */
+        for (size_t i = 0, j = 0; i < mfccOut.size();
+                    ++i, j += this->m_params.m_numFbankBins) {
+            *ptrMfcc++ = math::MathUtils::DotProductF32(
+                                            ptrDct + j,
+                                            ptrMel,
+                                            this->m_params.m_numFbankBins);
+        }
+        return mfccOut;
+    }
+
+    std::vector<std::vector<float>> MFCC::CreateMelFilterBank()
+    {
+        size_t numFftBins = this->m_params.m_frameLenPadded / 2;
+        float fftBinWidth = static_cast<float>(this->m_params.m_samplingFreq) / this->m_params.m_frameLenPadded;
+
+        float melLowFreq = MFCC::MelScale(this->m_params.m_melLoFreq,
+                                          this->m_params.m_useHtkMethod);
+        float melHighFreq = MFCC::MelScale(this->m_params.m_melHiFreq,
+                                           this->m_params.m_useHtkMethod);
+        float melFreqDelta = (melHighFreq - melLowFreq) / (this->m_params.m_numFbankBins + 1);
+
+        std::vector<float> thisBin = std::vector<float>(numFftBins);
+        std::vector<std::vector<float>> melFilterBank(
+                                            this->m_params.m_numFbankBins);
+        this->m_filterBankFilterFirst =
+                        std::vector<uint32_t>(this->m_params.m_numFbankBins);
+        this->m_filterBankFilterLast =
+                        std::vector<uint32_t>(this->m_params.m_numFbankBins);
+
+        for (size_t bin = 0; bin < this->m_params.m_numFbankBins; bin++) {
+            float leftMel = melLowFreq + bin * melFreqDelta;
+            float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+            float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+            uint32_t firstIndex = 0;
+            uint32_t lastIndex = 0;
+            bool firstIndexFound = false;
+            const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->m_params.m_useHtkMethod);
+
+            for (size_t i = 0; i < numFftBins; i++) {
+                float freq = (fftBinWidth * i);  /* Center freq of this fft bin. */
+                float mel = MFCC::MelScale(freq, this->m_params.m_useHtkMethod);
+                thisBin[i] = 0.0;
+
+                if (mel > leftMel && mel < rightMel) {
+                    float weight;
+                    if (mel <= centerMel) {
+                        weight = (mel - leftMel) / (centerMel - leftMel);
+                    } else {
+                        weight = (rightMel - mel) / (rightMel - centerMel);
+                    }
+
+                    thisBin[i] = weight * normaliser;
+                    if (!firstIndexFound) {
+                        firstIndex = i;
+                        firstIndexFound = true;
+                    }
+                    lastIndex = i;
+                }
+            }
+
+            this->m_filterBankFilterFirst[bin] = firstIndex;
+            this->m_filterBankFilterLast[bin] = lastIndex;
+
+            /* Copy the part we care about. */
+            for (uint32_t i = firstIndex; i <= lastIndex; i++) {
+                melFilterBank[bin].push_back(thisBin[i]);
+            }
+        }
+
+        return melFilterBank;
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/application/api/common/source/Model.cc b/source/application/api/common/source/Model.cc
new file mode 100644
index 0000000..f1ac91d
--- /dev/null
+++ b/source/application/api/common/source/Model.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Model.hpp"
+#include "log_macros.h"
+
+#include <cinttypes>
+
+/* Initialise the model */
+arm::app::Model::~Model()
+{
+   delete this->m_pInterpreter;
+    /**
+     * No clean-up function available for allocator in TensorFlow Lite Micro yet.
+     **/
+}
+
+arm::app::Model::Model() :
+    m_inited (false),
+    m_type(kTfLiteNoType)
+{
+    this->m_pErrorReporter = tflite::GetMicroErrorReporter();
+}
+
+bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
+                           uint32_t tensorArenaSize,
+                           uint8_t* nnModelAddr,
+                           uint32_t nnModelSize,
+                           tflite::MicroAllocator* allocator)
+{
+    /* Following tf lite micro example:
+     * Map the model into a usable data structure. This doesn't involve any
+     * copying or parsing, it's a very lightweight operation. */
+    debug("loading model from @ 0x%p\n", nnModelAddr);
+    debug("model size: %" PRIu32 " bytes.\n", nnModelSize);
+
+    this->m_pModel = ::tflite::GetModel(nnModelAddr);
+
+    if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) {
+        this->m_pErrorReporter->Report(
+            "[ERROR] model's schema version %d is not equal "
+            "to supported version %d.",
+            this->m_pModel->version(), TFLITE_SCHEMA_VERSION);
+        return false;
+    }
+
+    this->m_modelAddr = nnModelAddr;
+    this->m_modelSize = nnModelSize;
+
+    /* Pull in only the operation implementations we need.
+     * This relies on a complete list of all the ops needed by this graph.
+     * An easier approach is to just use the AllOpsResolver, but this will
+     * incur some penalty in code space for op implementations that are not
+     * needed by this graph.
+     * static ::tflite::ops::micro::AllOpsResolver resolver; */
+    /* NOLINTNEXTLINE(runtime-global-variables) */
+    debug("loading op resolver\n");
+
+    this->EnlistOperations();
+
+    /* Create allocator instance, if it doesn't exist */
+    this->m_pAllocator = allocator;
+    if (!this->m_pAllocator) {
+        /* Create an allocator instance */
+        info("Creating allocator using tensor arena at 0x%p\n", tensorArenaAddr);
+
+        this->m_pAllocator = tflite::MicroAllocator::Create(
+                                        tensorArenaAddr,
+                                        tensorArenaSize,
+                                        this->m_pErrorReporter);
+
+        if (!this->m_pAllocator) {
+            printf_err("Failed to create allocator\n");
+            return false;
+        }
+        debug("Created new allocator @ 0x%p\n", this->m_pAllocator);
+    } else {
+        debug("Using existing allocator @ 0x%p\n", this->m_pAllocator);
+    }
+
+    this->m_pInterpreter = new ::tflite::MicroInterpreter(
+        this->m_pModel, this->GetOpResolver(),
+        this->m_pAllocator, this->m_pErrorReporter);
+
+    if (!this->m_pInterpreter) {
+        printf_err("Failed to allocate interpreter\n");
+        return false;
+    }
+
+    /* Allocate memory from the tensor_arena for the model's tensors. */
+    info("Allocating tensors\n");
+    TfLiteStatus allocate_status = this->m_pInterpreter->AllocateTensors();
+
+    if (allocate_status != kTfLiteOk) {
+        printf_err("tensor allocation failed!\n");
+        delete this->m_pInterpreter;
+        return false;
+    }
+
+    /* Get information about the memory area to use for the model's input. */
+    this->m_input.resize(this->GetNumInputs());
+    for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++)
+        this->m_input[inIndex] = this->m_pInterpreter->input(inIndex);
+
+    this->m_output.resize(this->GetNumOutputs());
+    for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++)
+        this->m_output[outIndex] = this->m_pInterpreter->output(outIndex);
+
+    if (this->m_input.empty() || this->m_output.empty()) {
+        printf_err("failed to get tensors\n");
+        return false;
+    } else {
+        this->m_type = this->m_input[0]->type;  /* Input 0 should be the main input */
+
+        /* Clear the input & output tensors */
+        for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) {
+            std::memset(this->m_input[inIndex]->data.data, 0, this->m_input[inIndex]->bytes);
+        }
+        for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) {
+            std::memset(this->m_output[outIndex]->data.data, 0, this->m_output[outIndex]->bytes);
+        }
+
+        this->LogInterpreterInfo();
+    }
+
+    this->m_inited = true;
+    return true;
+}
+
+tflite::MicroAllocator* arm::app::Model::GetAllocator()
+{
+    if (this->IsInited()) {
+        return this->m_pAllocator;
+    }
+    return nullptr;
+}
+
+void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor)
+{
+    if (!tensor) {
+        printf_err("Invalid tensor\n");
+        assert(tensor);
+        return;
+    }
+
+    debug("\ttensor is assigned to 0x%p\n", tensor);
+    info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type));
+    info("\ttensor occupies %zu bytes with dimensions\n",
+         tensor->bytes);
+    for (int i = 0 ; i < tensor->dims->size; ++i) {
+        info ("\t\t%d: %3d\n", i, tensor->dims->data[i]);
+    }
+
+    TfLiteQuantization quant = tensor->quantization;
+    if (kTfLiteAffineQuantization == quant.type) {
+        auto* quantParams = (TfLiteAffineQuantization*)quant.params;
+        info("Quant dimension: %" PRIi32 "\n", quantParams->quantized_dimension);
+        for (int i = 0; i < quantParams->scale->size; ++i) {
+            info("Scale[%d] = %f\n", i, quantParams->scale->data[i]);
+        }
+        for (int i = 0; i < quantParams->zero_point->size; ++i) {
+            info("ZeroPoint[%d] = %d\n", i, quantParams->zero_point->data[i]);
+        }
+    }
+}
+
+void arm::app::Model::LogInterpreterInfo()
+{
+    if (!this->m_pInterpreter) {
+        printf_err("Invalid interpreter\n");
+        return;
+    }
+
+    info("Model INPUT tensors: \n");
+    for (auto input : this->m_input) {
+        this->LogTensorInfo(input);
+    }
+
+    info("Model OUTPUT tensors: \n");
+    for (auto output : this->m_output) {
+        this->LogTensorInfo(output);
+    }
+
+    info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
+        this->m_pInterpreter->arena_used_bytes());
+
+    /* We expect there to be only one subgraph. */
+    const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
+    info("Number of operators: %" PRIu32 "\n", nOperators);
+
+    const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
+
+    auto* opcodes = this->m_pModel->operator_codes();
+
+    /* For each operator, display registration information. */
+    for (size_t i = 0 ; i < nOperators; ++i) {
+        const tflite::Operator* op = subgraph->operators()->Get(i);
+        const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
+        const TfLiteRegistration* reg = nullptr;
+
+        tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(),
+                                          this->m_pErrorReporter, &reg);
+        std::string opName;
+
+        if (reg) {
+            if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) {
+                opName = std::string(reg->custom_name);
+            } else {
+                opName = std::string(EnumNameBuiltinOperator(
+                            tflite::BuiltinOperator(reg->builtin_code)));
+            }
+        }
+        info("\tOperator %zu: %s\n", i, opName.c_str());
+    }
+}
+
+bool arm::app::Model::IsInited() const
+{
+    return this->m_inited;
+}
+
+bool arm::app::Model::IsDataSigned() const
+{
+    return this->GetType() == kTfLiteInt8;
+}
+
+bool arm::app::Model::ContainsEthosUOperator() const
+{
+    /* We expect there to be only one subgraph. */
+    const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
+    const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
+    const auto* opcodes = this->m_pModel->operator_codes();
+
+    /* check for custom operators */
+    for (size_t i = 0; (i < nOperators); ++i)
+    {
+        const tflite::Operator* op = subgraph->operators()->Get(i);
+        const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
+
+        auto builtin_code = tflite::GetBuiltinCode(opcode);
+        if ((builtin_code == tflite::BuiltinOperator_CUSTOM) &&
+            ( nullptr != opcode->custom_code()) &&
+            ( "ethos-u" == std::string(opcode->custom_code()->c_str())))
+        {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool arm::app::Model::RunInference()
+{
+    bool inference_state = false;
+    if (this->m_pModel && this->m_pInterpreter) {
+        if (kTfLiteOk != this->m_pInterpreter->Invoke()) {
+            printf_err("Invoke failed.\n");
+        } else {
+            inference_state = true;
+        }
+    } else {
+        printf_err("Error: No interpreter!\n");
+    }
+    return inference_state;
+}
+
+TfLiteTensor* arm::app::Model::GetInputTensor(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->m_input.at(index);
+    }
+    return nullptr;
+}
+
+TfLiteTensor* arm::app::Model::GetOutputTensor(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->m_output.at(index);
+    }
+    return nullptr;
+}
+
+size_t arm::app::Model::GetNumInputs() const
+{
+    if (this->m_pModel && this->m_pInterpreter) {
+        return this->m_pInterpreter->inputs_size();
+    }
+    return 0;
+}
+
+size_t arm::app::Model::GetNumOutputs() const
+{
+    if (this->m_pModel && this->m_pInterpreter) {
+        return this->m_pInterpreter->outputs_size();
+    }
+    return 0;
+}
+
+
+TfLiteType arm::app::Model::GetType() const
+{
+    return this->m_type;
+}
+
+TfLiteIntArray* arm::app::Model::GetInputShape(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->m_input.at(index)->dims;
+    }
+    return nullptr;
+}
+
+TfLiteIntArray* arm::app::Model::GetOutputShape(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->m_output.at(index)->dims;
+    }
+    return nullptr;
+}
+
+bool arm::app::Model::ShowModelInfoHandler()
+{
+    if (!this->IsInited()) {
+        printf_err("Model is not initialised! Terminating processing.\n");
+        return false;
+    }
+
+    PrintTensorFlowVersion();
+    info("Model address: 0x%p", this->ModelPointer());
+    info("Model size:      %" PRIu32 " bytes.", this->ModelSize());
+    info("Model info:\n");
+    this->LogInterpreterInfo();
+
+    info("The model is optimised for Ethos-U NPU: %s.\n", this->ContainsEthosUOperator()? "yes": "no");
+
+    return true;
+}
+
+const uint8_t* arm::app::Model::ModelPointer()
+{
+    return this->m_modelAddr;
+}
+
+uint32_t arm::app::Model::ModelSize()
+{
+    return this->m_modelSize;
+}
diff --git a/source/application/api/common/source/TensorFlowLiteMicro.cc b/source/application/api/common/source/TensorFlowLiteMicro.cc
new file mode 100644
index 0000000..8738e5c
--- /dev/null
+++ b/source/application/api/common/source/TensorFlowLiteMicro.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TensorFlowLiteMicro.hpp"
+
+void PrintTensorFlowVersion()
+{}
+
+arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
+{
+    arm::app::QuantParams params;
+    if (kTfLiteAffineQuantization == tensor->quantization.type) {
+        auto* quantParams = (TfLiteAffineQuantization*) (tensor->quantization.params);
+        if (quantParams && 0 == quantParams->quantized_dimension) {
+            if (quantParams->scale->size) {
+                params.scale = quantParams->scale->data[0];
+            }
+            if (quantParams->zero_point->size) {
+                params.offset = quantParams->zero_point->data[0];
+            }
+        } else if (tensor->params.scale != 0.0) {
+            /* Legacy tensorflow quantisation parameters */
+            params.scale = tensor->params.scale;
+            params.offset = tensor->params.zero_point;
+        }
+    }
+    return params;
+}
+
+extern "C" void DebugLog(const char* s)
+{
+    puts(s);
+}