MLECO-3183: Refactoring application sources

Platform agnostic application sources are moved into application
api module with their own independent CMake projects.

Changes for MLECO-3080 also included - they create CMake projects
individial API's (again, platform agnostic) that dependent on the
common logic. The API for KWS_API "joint" API has been removed and
now the use case relies on individual KWS, and ASR API libraries.

Change-Id: I1f7748dc767abb3904634a04e0991b74ac7b756d
Signed-off-by: Kshitij Sisodia <kshitij.sisodia@arm.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5a80554..e501a54 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -115,6 +115,9 @@
 # Include the tensorflow build target
 include(${CMAKE_SCRIPTS_DIR}/tensorflow.cmake)
 
+# Add the common API library target (tensorflow-lite-micro target is needed)
+add_subdirectory(${SRC_PATH}/application/api/common ${CMAKE_BINARY_DIR}/api/common)
+
 # Include directories for application module:
 set(APPLICATION_INCLUDE_DIRS
     ${SRC_PATH}/application/tensorflow-lite-micro/include
@@ -125,11 +128,6 @@
     "${SRC_PATH}/application/main/*.cc"
     "${SRC_PATH}/application/main/*.cpp"
     "${SRC_PATH}/application/main/*.c"
-    "${SRC_PATH}/application/main/**/*.cc"
-    "${SRC_PATH}/application/main/**/*.cpp"
-    "${SRC_PATH}/application/main/**/*.c"
-    "${SRC_PATH}/application/tensorflow-lite-micro/**/*.cc"
-    "${SRC_PATH}/application/tensorflow-lite-micro/*.cc"
     )
 list(FILTER SRC_APPLICATION EXCLUDE REGEX ".*main\\.c.*$")
 set(SRC_MAIN "${SRC_PATH}/application/main/Main.cc")
@@ -183,6 +181,7 @@
     file(GLOB UC_CMAKE_FILE
         "${SRC_USE_CASE}/${use_case}/*.cmake")
 
+    # Include the use case cmake file.
     include(${UC_CMAKE_FILE})
 
     file(GLOB_RECURSE UC_SRC
@@ -207,7 +206,7 @@
             "${${use_case}_COMPILE_DEFS}")
     endif()
 
-    set(UC_LIB_NAME lib${TARGET_NAME})
+    set(UC_LIB_NAME ${use_case})
 
     # Consolidated application static lib:
     add_library(${UC_LIB_NAME} STATIC
@@ -218,12 +217,11 @@
     target_include_directories(${UC_LIB_NAME} PUBLIC
         ${APPLICATION_INCLUDE_DIRS}
         ${UC_INCLUDE}
-        ${INC_GEN_DIR}
-        ${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro/tools/make/downloads/flatbuffers/include)
+        ${INC_GEN_DIR})
 
     # Set the activation buffer size
     target_compile_definitions(${UC_LIB_NAME} PUBLIC
-            "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}")
+        "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}")
 
     target_link_libraries(${UC_LIB_NAME} PUBLIC
         log
@@ -232,6 +230,26 @@
         profiler
         tensorflow-lite-micro)
 
+    # If an API exists for this use case, include the projects here and add to
+    # the library list.
+    foreach(API_TO_USE ${${use_case}_API_LIST})
+
+        # If the required target doesn't yet exist, include the project here:
+        if (NOT TARGET ${API_TO_USE}_api)
+            add_subdirectory(
+                ${SRC_PATH}/application/api/use_case/${API_TO_USE}  # Source path
+                ${CMAKE_BINARY_DIR}/api/use_case/${API_TO_USE})  # Binary path
+        endif()
+
+        # Check if the target now exists
+        if (TARGET ${API_TO_USE}_api)
+            message(STATUS "Using ${API_TO_USE}_api for ${use_case}")
+            target_link_libraries(${UC_LIB_NAME} PUBLIC ${API_TO_USE}_api)
+        else()
+            message(FATAL_ERROR "${API_TO_USE}_api target not found!")
+        endif()
+    endforeach()
+
     add_executable(${TARGET_NAME} ${SRC_MAIN})
 
     target_link_libraries(${TARGET_NAME} PUBLIC ${UC_LIB_NAME})
diff --git a/docs/sections/customizing.md b/docs/sections/customizing.md
index 5b822db..f08706b 100644
--- a/docs/sections/customizing.md
+++ b/docs/sections/customizing.md
@@ -323,10 +323,6 @@
     /** @brief   Adds operations to the op resolver instance. */
     bool EnlistOperations() override;
 
-    const uint8_t* ModelPointer() override;
-
-    size_t ModelSize() override;
-
   private:
     /* Maximum number of individual operations that can be enlisted. */
     static constexpr int ms_maxOpCnt = 5;
@@ -428,13 +424,25 @@
 #include "HelloWorldModel.hpp"
 #include "log_macros.h"
 
+  namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+      } /* namespace app */
+  } /* namespace arm */
+  
+  extern uint8_t* GetModelPointer();
+  extern size_t GetModelLen();
+
   void main_loop() {
 
   /* model wrapper object */
   arm::app::HelloWorldModel model;
 
   /* Load the model */
-  if (!model.Init()) {
+  if (!model.Init(arm::app::tensor_arena,
+                    sizeof(arm::app::tensor_arena),
+                    GetModelPointer(),
+                    GetModelLen())) {
     printf_err("failed to initialise model\n");
     return;
   }
@@ -463,7 +471,10 @@
   arm::app::HelloWorldModel model;
 
   /* Load the model */
-  if (!model.Init()) {
+  if (!model.Init(arm::app::tensor_arena,
+                    sizeof(arm::app::tensor_arena),
+                    GetModelPointer(),
+                    GetModelLen())) {
     printf_err(\"failed to initialise model\\n\");
     return;
   }
diff --git a/scripts/py/templates/tflite.cc.template b/scripts/py/templates/tflite.cc.template
index 97bdec5..a06c14a 100644
--- a/scripts/py/templates/tflite.cc.template
+++ b/scripts/py/templates/tflite.cc.template
@@ -16,7 +16,11 @@
 #}
 {{common_template_header}}
 
-#include "Model.hpp"
+#include "BufAttributes.hpp"
+
+#include <cstddef>
+#include <cstdint>
+
 {% for header in additional_headers %}
 #include "{{header}}"
 {% endfor %}
diff --git a/source/application/api/common/CMakeLists.txt b/source/application/api/common/CMakeLists.txt
new file mode 100644
index 0000000..5078adc
--- /dev/null
+++ b/source/application/api/common/CMakeLists.txt
@@ -0,0 +1,59 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+#########################################################
+#  Common utility library used by use case libraries.   #
+#  NOTE: this library should not depend on HAL.         #
+#########################################################
+
+cmake_minimum_required(VERSION 3.15.6)
+
+set(COMMON_UC_UTILS_TARGET common_api)
+project(${COMMON_UC_UTILS_TARGET}
+    DESCRIPTION     "Common Utilities library"
+    LANGUAGES       CXX)
+
+# Create static library
+add_library(${COMMON_UC_UTILS_TARGET} STATIC)
+
+## Include directories - public
+target_include_directories(${COMMON_UC_UTILS_TARGET}
+    PUBLIC
+    include
+    ${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro/tools/make/downloads/flatbuffers/include)
+
+## Sources
+target_sources(${COMMON_UC_UTILS_TARGET}
+    PRIVATE
+    source/Classifier.cc
+    source/ImageUtils.cc
+    source/Mfcc.cc
+    source/Model.cc
+    source/TensorFlowLiteMicro.cc)
+
+# Link time library targets:
+target_link_libraries(${COMMON_UC_UTILS_TARGET}
+    PUBLIC
+    log                     # Logging functions
+    arm_math                # Math functions
+    tensorflow-lite-micro)  # TensorFlow Lite Micro library
+
+# Display status:
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${COMMON_UC_UTILS_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/application/main/include/AudioUtils.hpp b/source/application/api/common/include/AudioUtils.hpp
similarity index 100%
rename from source/application/main/include/AudioUtils.hpp
rename to source/application/api/common/include/AudioUtils.hpp
diff --git a/source/application/main/include/BaseProcessing.hpp b/source/application/api/common/include/BaseProcessing.hpp
similarity index 98%
rename from source/application/main/include/BaseProcessing.hpp
rename to source/application/api/common/include/BaseProcessing.hpp
index c099db2..a54dd12 100644
--- a/source/application/main/include/BaseProcessing.hpp
+++ b/source/application/api/common/include/BaseProcessing.hpp
@@ -17,7 +17,7 @@
 #ifndef BASE_PROCESSING_HPP
 #define BASE_PROCESSING_HPP
 
-#include "Model.hpp"
+#include <cstddef>
 
 namespace arm {
 namespace app {
diff --git a/source/application/main/include/ClassificationResult.hpp b/source/application/api/common/include/ClassificationResult.hpp
similarity index 100%
rename from source/application/main/include/ClassificationResult.hpp
rename to source/application/api/common/include/ClassificationResult.hpp
diff --git a/source/application/main/include/Classifier.hpp b/source/application/api/common/include/Classifier.hpp
similarity index 100%
rename from source/application/main/include/Classifier.hpp
rename to source/application/api/common/include/Classifier.hpp
diff --git a/source/application/main/include/DataStructures.hpp b/source/application/api/common/include/DataStructures.hpp
similarity index 100%
rename from source/application/main/include/DataStructures.hpp
rename to source/application/api/common/include/DataStructures.hpp
diff --git a/source/application/main/include/ImageUtils.hpp b/source/application/api/common/include/ImageUtils.hpp
similarity index 100%
rename from source/application/main/include/ImageUtils.hpp
rename to source/application/api/common/include/ImageUtils.hpp
diff --git a/source/application/main/include/Mfcc.hpp b/source/application/api/common/include/Mfcc.hpp
similarity index 100%
rename from source/application/main/include/Mfcc.hpp
rename to source/application/api/common/include/Mfcc.hpp
diff --git a/source/application/tensorflow-lite-micro/include/Model.hpp b/source/application/api/common/include/Model.hpp
similarity index 76%
rename from source/application/tensorflow-lite-micro/include/Model.hpp
rename to source/application/api/common/include/Model.hpp
index 151b680..df1b259 100644
--- a/source/application/tensorflow-lite-micro/include/Model.hpp
+++ b/source/application/api/common/include/Model.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,7 +18,6 @@
 #define MODEL_HPP
 
 #include "TensorFlowLiteMicro.hpp"
-#include "BufAttributes.hpp"
 
 #include <cstdint>
 
@@ -64,12 +63,20 @@
         void LogInterpreterInfo();
 
         /** @brief      Initialise the model class object.
+         *  @param[in]  tensorArenaAddress  Pointer to the tensor arena buffer.
+         *  @param[in]  tensorArenaAddress  Size of the tensor arena buffer in bytes.
+         *  @param[in]  nnModelAddr         Pointer to the model.
+         *  @param[in]  nnModelSize         Size of the model in bytes, if known.
          *  @param[in]  allocator   Optional: a pre-initialised micro allocator pointer,
          *                          if available. If supplied, this allocator will be used
          *                          to create the interpreter instance.
          *  @return     true if initialisation succeeds, false otherwise.
         **/
-        bool Init(tflite::MicroAllocator* allocator = nullptr);
+        bool Init(uint8_t* tensorArenaAddr,
+                  uint32_t tensorArenaSize,
+                  uint8_t* nnModelAddr,
+                  uint32_t nnModelSize,
+                  tflite::MicroAllocator* allocator = nullptr);
 
         /**
          * @brief       Gets the allocator pointer for this instance.
@@ -102,12 +109,12 @@
         /** @brief      Gets the pointer to the NN model data array.
          *  @return     Pointer of uint8_t type.
          **/
-        virtual const uint8_t* ModelPointer() = 0;
+        const uint8_t* ModelPointer();
 
         /** @brief      Gets the model size.
          *  @return     size_t, size in bytes.
          **/
-        virtual size_t ModelSize() = 0;
+        uint32_t ModelSize();
 
         /**
          * @brief       Gets the op resolver for the model instance.
@@ -126,16 +133,17 @@
         size_t GetActivationBufferSize();
 
     private:
-        tflite::ErrorReporter*          m_pErrorReporter      = nullptr;      /* Pointer to the error reporter. */
-        const tflite::Model*            m_pModel              = nullptr;      /* Tflite model pointer. */
-        tflite::MicroInterpreter*       m_pInterpreter        = nullptr;      /* Tflite interpreter. */
-        tflite::MicroAllocator*         m_pAllocator          = nullptr;      /* Tflite micro allocator. */
-        bool                            m_inited              = false;        /* Indicates whether this object has been initialised. */
+        tflite::ErrorReporter*          m_pErrorReporter     = nullptr;     /* Pointer to the error reporter. */
+        const tflite::Model*            m_pModel             = nullptr;     /* Tflite model pointer. */
+        tflite::MicroInterpreter*       m_pInterpreter       = nullptr;     /* Tflite interpreter. */
+        tflite::MicroAllocator*         m_pAllocator         = nullptr;     /* Tflite micro allocator. */
+        bool                            m_inited             = false;       /* Indicates whether this object has been initialised. */
+        uint8_t*                        m_modelAddr          = nullptr;     /* Model address */
+        uint32_t                        m_modelSize          = 0;           /* Model size */
 
         std::vector<TfLiteTensor*>      m_input              = {};           /* Model's input tensor pointers. */
         std::vector<TfLiteTensor*>      m_output             = {};           /* Model's output tensor pointers. */
         TfLiteType                      m_type               = kTfLiteNoType;/* Model's data type. */
-
     };
 
 } /* namespace app */
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/api/common/include/TensorFlowLiteMicro.hpp
similarity index 100%
rename from source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
rename to source/application/api/common/include/TensorFlowLiteMicro.hpp
diff --git a/source/application/main/Classifier.cc b/source/application/api/common/source/Classifier.cc
similarity index 100%
rename from source/application/main/Classifier.cc
rename to source/application/api/common/source/Classifier.cc
diff --git a/source/application/main/ImageUtils.cc b/source/application/api/common/source/ImageUtils.cc
similarity index 100%
rename from source/application/main/ImageUtils.cc
rename to source/application/api/common/source/ImageUtils.cc
diff --git a/source/application/main/Mfcc.cc b/source/application/api/common/source/Mfcc.cc
similarity index 100%
rename from source/application/main/Mfcc.cc
rename to source/application/api/common/source/Mfcc.cc
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/api/common/source/Model.cc
similarity index 89%
rename from source/application/tensorflow-lite-micro/Model.cc
rename to source/application/api/common/source/Model.cc
index 22a1a4d..f1ac91d 100644
--- a/source/application/tensorflow-lite-micro/Model.cc
+++ b/source/application/api/common/source/Model.cc
@@ -35,14 +35,19 @@
     this->m_pErrorReporter = tflite::GetMicroErrorReporter();
 }
 
-bool arm::app::Model::Init(tflite::MicroAllocator* allocator)
+bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
+                           uint32_t tensorArenaSize,
+                           uint8_t* nnModelAddr,
+                           uint32_t nnModelSize,
+                           tflite::MicroAllocator* allocator)
 {
     /* Following tf lite micro example:
      * Map the model into a usable data structure. This doesn't involve any
      * copying or parsing, it's a very lightweight operation. */
-    const uint8_t* model_addr = ModelPointer();
-    debug("loading model from @ 0x%p\n", model_addr);
-    this->m_pModel = ::tflite::GetModel(model_addr);
+    debug("loading model from @ 0x%p\n", nnModelAddr);
+    debug("model size: %" PRIu32 " bytes.\n", nnModelSize);
+
+    this->m_pModel = ::tflite::GetModel(nnModelAddr);
 
     if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) {
         this->m_pErrorReporter->Report(
@@ -52,6 +57,9 @@
         return false;
     }
 
+    this->m_modelAddr = nnModelAddr;
+    this->m_modelSize = nnModelSize;
+
     /* Pull in only the operation implementations we need.
      * This relies on a complete list of all the ops needed by this graph.
      * An easier approach is to just use the AllOpsResolver, but this will
@@ -62,27 +70,16 @@
     debug("loading op resolver\n");
 
     this->EnlistOperations();
-    
-#if !defined(ARM_NPU)
-    /* If it is not a NPU build check if the model contains a NPU operator */
-    bool contains_ethosu_operator = this->ContainsEthosUOperator();
-    if (contains_ethosu_operator)
-    {
-        printf_err("Ethos-U operator present in the model but this build does not include Ethos-U drivers\n");
-        return false;
-    }
-#endif /* ARM_NPU */
 
     /* Create allocator instance, if it doesn't exist */
     this->m_pAllocator = allocator;
     if (!this->m_pAllocator) {
         /* Create an allocator instance */
-        info("Creating allocator using tensor arena in %s\n",
-            ACTIVATION_BUF_SECTION_NAME);
+        info("Creating allocator using tensor arena at 0x%p\n", tensorArenaAddr);
 
         this->m_pAllocator = tflite::MicroAllocator::Create(
-                                        this->GetTensorArena(),
-                                        this->GetActivationBufferSize(),
+                                        tensorArenaAddr,
+                                        tensorArenaSize,
                                         this->m_pErrorReporter);
 
         if (!this->m_pAllocator) {
@@ -341,6 +338,8 @@
     }
 
     PrintTensorFlowVersion();
+    info("Model address: 0x%p", this->ModelPointer());
+    info("Model size:      %" PRIu32 " bytes.", this->ModelSize());
     info("Model info:\n");
     this->LogInterpreterInfo();
 
@@ -348,18 +347,13 @@
 
     return true;
 }
-namespace arm {
-namespace app {
-    static uint8_t  tensor_arena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
-} /* namespace app */
-} /* namespace arm */
 
-size_t arm::app::Model::GetActivationBufferSize()
+const uint8_t* arm::app::Model::ModelPointer()
 {
-    return ACTIVATION_BUF_SZ;
+    return this->m_modelAddr;
 }
 
-uint8_t *arm::app::Model::GetTensorArena()
+uint32_t arm::app::Model::ModelSize()
 {
-    return tensor_arena;
-}
\ No newline at end of file
+    return this->m_modelSize;
+}
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/api/common/source/TensorFlowLiteMicro.cc
similarity index 100%
rename from source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
rename to source/application/api/common/source/TensorFlowLiteMicro.cc
diff --git a/source/application/api/use_case/ad/CMakeLists.txt b/source/application/api/use_case/ad/CMakeLists.txt
new file mode 100644
index 0000000..224816f
--- /dev/null
+++ b/source/application/api/use_case/ad/CMakeLists.txt
@@ -0,0 +1,41 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#             ANOMALY DETECTION API library             #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(AD_API_TARGET ad_api)
+project(${AD_API_TARGET}
+        DESCRIPTION     "Anomaly detection use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${AD_API_TARGET} STATIC
+    src/AdModel.cc
+    src/AdProcessing.cc
+    src/AdMelSpectrogram.cc
+    src/MelSpectrogram.cc)
+
+target_include_directories(${AD_API_TARGET} PUBLIC include)
+
+target_link_libraries(${AD_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${AD_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/ad/include/AdMelSpectrogram.hpp b/source/application/api/use_case/ad/include/AdMelSpectrogram.hpp
similarity index 100%
rename from source/use_case/ad/include/AdMelSpectrogram.hpp
rename to source/application/api/use_case/ad/include/AdMelSpectrogram.hpp
diff --git a/source/use_case/ad/include/AdModel.hpp b/source/application/api/use_case/ad/include/AdModel.hpp
similarity index 94%
rename from source/use_case/ad/include/AdModel.hpp
rename to source/application/api/use_case/ad/include/AdModel.hpp
index 2195a7c..0436a89 100644
--- a/source/use_case/ad/include/AdModel.hpp
+++ b/source/application/api/use_case/ad/include/AdModel.hpp
@@ -41,10 +41,6 @@
         /** @brief   Adds operations to the op resolver instance */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
         /* Maximum number of individual operations that can be enlisted */
         static constexpr int ms_maxOpCnt = 6;
diff --git a/source/use_case/ad/include/AdProcessing.hpp b/source/application/api/use_case/ad/include/AdProcessing.hpp
similarity index 99%
rename from source/use_case/ad/include/AdProcessing.hpp
rename to source/application/api/use_case/ad/include/AdProcessing.hpp
index 9abf6f1..abee75e 100644
--- a/source/use_case/ad/include/AdProcessing.hpp
+++ b/source/application/api/use_case/ad/include/AdProcessing.hpp
@@ -18,6 +18,7 @@
 #define AD_PROCESSING_HPP
 
 #include "BaseProcessing.hpp"
+#include "TensorFlowLiteMicro.hpp"
 #include "AudioUtils.hpp"
 #include "AdMelSpectrogram.hpp"
 #include "log_macros.h"
diff --git a/source/use_case/ad/include/MelSpectrogram.hpp b/source/application/api/use_case/ad/include/MelSpectrogram.hpp
similarity index 100%
rename from source/use_case/ad/include/MelSpectrogram.hpp
rename to source/application/api/use_case/ad/include/MelSpectrogram.hpp
diff --git a/source/use_case/ad/src/AdMelSpectrogram.cc b/source/application/api/use_case/ad/src/AdMelSpectrogram.cc
similarity index 100%
rename from source/use_case/ad/src/AdMelSpectrogram.cc
rename to source/application/api/use_case/ad/src/AdMelSpectrogram.cc
diff --git a/source/use_case/ad/src/AdModel.cc b/source/application/api/use_case/ad/src/AdModel.cc
similarity index 83%
rename from source/use_case/ad/src/AdModel.cc
rename to source/application/api/use_case/ad/src/AdModel.cc
index a2ef260..961c260 100644
--- a/source/use_case/ad/src/AdModel.cc
+++ b/source/application/api/use_case/ad/src/AdModel.cc
@@ -30,7 +30,6 @@
     this->m_opResolver.AddRelu6();
     this->m_opResolver.AddReshape();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -38,17 +37,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::AdModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-extern size_t GetModelLen();
-size_t arm::app::AdModel::ModelSize()
-{
-    return GetModelLen();
-}
diff --git a/source/use_case/ad/src/AdProcessing.cc b/source/application/api/use_case/ad/src/AdProcessing.cc
similarity index 99%
rename from source/use_case/ad/src/AdProcessing.cc
rename to source/application/api/use_case/ad/src/AdProcessing.cc
index a33131c..fb26a83 100644
--- a/source/use_case/ad/src/AdProcessing.cc
+++ b/source/application/api/use_case/ad/src/AdProcessing.cc
@@ -35,6 +35,8 @@
        m_audioDataStride{m_numMelSpecVectorsInAudioStride * melSpectrogramFrameStride},
        m_melSpec{melSpectrogramFrameLen}
 {
+    UNUSED(this->m_melSpectrogramFrameStride);
+
     if (!inputTensor) {
         printf_err("Invalid input tensor provided to pre-process\n");
         return;
diff --git a/source/use_case/ad/src/MelSpectrogram.cc b/source/application/api/use_case/ad/src/MelSpectrogram.cc
similarity index 100%
rename from source/use_case/ad/src/MelSpectrogram.cc
rename to source/application/api/use_case/ad/src/MelSpectrogram.cc
diff --git a/source/application/api/use_case/asr/CMakeLists.txt b/source/application/api/use_case/asr/CMakeLists.txt
new file mode 100644
index 0000000..77e3d6a
--- /dev/null
+++ b/source/application/api/use_case/asr/CMakeLists.txt
@@ -0,0 +1,43 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#       AUTOMATIC SPEECH RECOGNITION API library        #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(ASR_API_TARGET asr_api)
+project(${ASR_API_TARGET}
+        DESCRIPTION     "Automatic speech recognition use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${ASR_API_TARGET} STATIC
+        src/Wav2LetterPreprocess.cc
+        src/Wav2LetterPostprocess.cc
+        src/Wav2LetterMfcc.cc
+        src/AsrClassifier.cc
+        src/OutputDecode.cc
+        src/Wav2LetterModel.cc)
+
+target_include_directories(${ASR_API_TARGET} PUBLIC include)
+
+target_link_libraries(${ASR_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${ASR_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/asr/include/AsrClassifier.hpp b/source/application/api/use_case/asr/include/AsrClassifier.hpp
similarity index 100%
rename from source/use_case/asr/include/AsrClassifier.hpp
rename to source/application/api/use_case/asr/include/AsrClassifier.hpp
diff --git a/source/use_case/asr/include/AsrResult.hpp b/source/application/api/use_case/asr/include/AsrResult.hpp
similarity index 100%
rename from source/use_case/asr/include/AsrResult.hpp
rename to source/application/api/use_case/asr/include/AsrResult.hpp
diff --git a/source/use_case/asr/include/OutputDecode.hpp b/source/application/api/use_case/asr/include/OutputDecode.hpp
similarity index 100%
rename from source/use_case/asr/include/OutputDecode.hpp
rename to source/application/api/use_case/asr/include/OutputDecode.hpp
diff --git a/source/use_case/asr/include/Wav2LetterMfcc.hpp b/source/application/api/use_case/asr/include/Wav2LetterMfcc.hpp
similarity index 100%
rename from source/use_case/asr/include/Wav2LetterMfcc.hpp
rename to source/application/api/use_case/asr/include/Wav2LetterMfcc.hpp
diff --git a/source/use_case/kws_asr/include/Wav2LetterModel.hpp b/source/application/api/use_case/asr/include/Wav2LetterModel.hpp
similarity index 88%
rename from source/use_case/kws_asr/include/Wav2LetterModel.hpp
rename to source/application/api/use_case/asr/include/Wav2LetterModel.hpp
index 0e1adc5..a02eed1 100644
--- a/source/use_case/kws_asr/include/Wav2LetterModel.hpp
+++ b/source/application/api/use_case/asr/include/Wav2LetterModel.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,8 +14,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-#ifndef KWS_ASR_WAV2LETTER_MODEL_HPP
-#define KWS_ASR_WAV2LETTER_MODEL_HPP
+#ifndef ASR_WAV2LETTER_MODEL_HPP
+#define ASR_WAV2LETTER_MODEL_HPP
 
 #include "Model.hpp"
 
@@ -53,10 +53,6 @@
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
         /* Maximum number of individual operations that can be enlisted. */
         static constexpr int ms_maxOpCnt = 5;
@@ -68,4 +64,4 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* KWS_ASR_WAV2LETTER_MODEL_HPP */
+#endif /* ASR_WAV2LETTER_MODEL_HPP */
diff --git a/source/use_case/asr/include/Wav2LetterPostprocess.hpp b/source/application/api/use_case/asr/include/Wav2LetterPostprocess.hpp
similarity index 99%
rename from source/use_case/asr/include/Wav2LetterPostprocess.hpp
rename to source/application/api/use_case/asr/include/Wav2LetterPostprocess.hpp
index 446014d..02738bc 100644
--- a/source/use_case/asr/include/Wav2LetterPostprocess.hpp
+++ b/source/application/api/use_case/asr/include/Wav2LetterPostprocess.hpp
@@ -19,6 +19,7 @@
 
 #include "TensorFlowLiteMicro.hpp"   /* TensorFlow headers. */
 #include "BaseProcessing.hpp"
+#include "Model.hpp"
 #include "AsrClassifier.hpp"
 #include "AsrResult.hpp"
 #include "log_macros.h"
diff --git a/source/use_case/asr/include/Wav2LetterPreprocess.hpp b/source/application/api/use_case/asr/include/Wav2LetterPreprocess.hpp
similarity index 99%
rename from source/use_case/asr/include/Wav2LetterPreprocess.hpp
rename to source/application/api/use_case/asr/include/Wav2LetterPreprocess.hpp
index dc9a415..9943946 100644
--- a/source/use_case/asr/include/Wav2LetterPreprocess.hpp
+++ b/source/application/api/use_case/asr/include/Wav2LetterPreprocess.hpp
@@ -17,7 +17,7 @@
 #ifndef ASR_WAV2LETTER_PREPROCESS_HPP
 #define ASR_WAV2LETTER_PREPROCESS_HPP
 
-#include "Wav2LetterModel.hpp"
+#include "TensorFlowLiteMicro.hpp"
 #include "Wav2LetterMfcc.hpp"
 #include "AudioUtils.hpp"
 #include "DataStructures.hpp"
diff --git a/source/use_case/asr/src/AsrClassifier.cc b/source/application/api/use_case/asr/src/AsrClassifier.cc
similarity index 100%
rename from source/use_case/asr/src/AsrClassifier.cc
rename to source/application/api/use_case/asr/src/AsrClassifier.cc
diff --git a/source/use_case/asr/src/OutputDecode.cc b/source/application/api/use_case/asr/src/OutputDecode.cc
similarity index 100%
rename from source/use_case/asr/src/OutputDecode.cc
rename to source/application/api/use_case/asr/src/OutputDecode.cc
diff --git a/source/use_case/asr/src/Wav2LetterMfcc.cc b/source/application/api/use_case/asr/src/Wav2LetterMfcc.cc
similarity index 100%
rename from source/use_case/asr/src/Wav2LetterMfcc.cc
rename to source/application/api/use_case/asr/src/Wav2LetterMfcc.cc
diff --git a/source/use_case/asr/src/Wav2LetterModel.cc b/source/application/api/use_case/asr/src/Wav2LetterModel.cc
similarity index 82%
rename from source/use_case/asr/src/Wav2LetterModel.cc
rename to source/application/api/use_case/asr/src/Wav2LetterModel.cc
index 8b38f4f..7b1e521 100644
--- a/source/use_case/asr/src/Wav2LetterModel.cc
+++ b/source/application/api/use_case/asr/src/Wav2LetterModel.cc
@@ -31,7 +31,6 @@
     this->m_opResolver.AddLeakyRelu();
     this->m_opResolver.AddSoftmax();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -39,19 +38,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
-
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::Wav2LetterModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::Wav2LetterModel::ModelSize()
-{
-    return GetModelLen();
-}
\ No newline at end of file
diff --git a/source/use_case/asr/src/Wav2LetterPostprocess.cc b/source/application/api/use_case/asr/src/Wav2LetterPostprocess.cc
similarity index 98%
rename from source/use_case/asr/src/Wav2LetterPostprocess.cc
rename to source/application/api/use_case/asr/src/Wav2LetterPostprocess.cc
index 42f434e..00e689b 100644
--- a/source/use_case/asr/src/Wav2LetterPostprocess.cc
+++ b/source/application/api/use_case/asr/src/Wav2LetterPostprocess.cc
@@ -93,7 +93,7 @@
 
         if (static_cast<int>(this->m_totalLen) !=
                              tensor->dims->data[axisIdx]) {
-            printf_err("Unexpected tensor dimension for axis %d, got %d, \n",
+            printf_err("Unexpected tensor dimension for axis %" PRIu32", got %d.\n",
                 axisIdx, tensor->dims->data[axisIdx]);
             return false;
         }
@@ -211,4 +211,4 @@
     }
 
 } /* namespace app */
-} /* namespace arm */
\ No newline at end of file
+} /* namespace arm */
diff --git a/source/use_case/asr/src/Wav2LetterPreprocess.cc b/source/application/api/use_case/asr/src/Wav2LetterPreprocess.cc
similarity index 100%
rename from source/use_case/asr/src/Wav2LetterPreprocess.cc
rename to source/application/api/use_case/asr/src/Wav2LetterPreprocess.cc
diff --git a/source/application/api/use_case/img_class/CMakeLists.txt b/source/application/api/use_case/img_class/CMakeLists.txt
new file mode 100644
index 0000000..f4818d8
--- /dev/null
+++ b/source/application/api/use_case/img_class/CMakeLists.txt
@@ -0,0 +1,39 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#                IMG CLASS API library                  #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(IMG_CLASS_API_TARGET img_class_api)
+project(${IMG_CLASS_API_TARGET}
+        DESCRIPTION     "Image classification use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${IMG_CLASS_API_TARGET} STATIC
+    src/ImgClassProcessing.cc
+    src/MobileNetModel.cc)
+
+target_include_directories(${IMG_CLASS_API_TARGET} PUBLIC include)
+
+target_link_libraries(${IMG_CLASS_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${IMG_CLASS_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/img_class/include/ImgClassProcessing.hpp b/source/application/api/use_case/img_class/include/ImgClassProcessing.hpp
similarity index 99%
rename from source/use_case/img_class/include/ImgClassProcessing.hpp
rename to source/application/api/use_case/img_class/include/ImgClassProcessing.hpp
index e931b7d..55b5ce1 100644
--- a/source/use_case/img_class/include/ImgClassProcessing.hpp
+++ b/source/application/api/use_case/img_class/include/ImgClassProcessing.hpp
@@ -18,7 +18,6 @@
 #define IMG_CLASS_PROCESSING_HPP
 
 #include "BaseProcessing.hpp"
-#include "Model.hpp"
 #include "Classifier.hpp"
 
 namespace arm {
diff --git a/source/use_case/img_class/include/MobileNetModel.hpp b/source/application/api/use_case/img_class/include/MobileNetModel.hpp
similarity index 94%
rename from source/use_case/img_class/include/MobileNetModel.hpp
rename to source/application/api/use_case/img_class/include/MobileNetModel.hpp
index 503f1ac..adaa9c2 100644
--- a/source/use_case/img_class/include/MobileNetModel.hpp
+++ b/source/application/api/use_case/img_class/include/MobileNetModel.hpp
@@ -37,10 +37,6 @@
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
         /* Maximum number of individual operations that can be enlisted. */
         static constexpr int ms_maxOpCnt = 7;
@@ -52,4 +48,4 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* IMG_CLASS_MOBILENETMODEL_HPP */
\ No newline at end of file
+#endif /* IMG_CLASS_MOBILENETMODEL_HPP */
diff --git a/source/use_case/img_class/src/ImgClassProcessing.cc b/source/application/api/use_case/img_class/src/ImgClassProcessing.cc
similarity index 99%
rename from source/use_case/img_class/src/ImgClassProcessing.cc
rename to source/application/api/use_case/img_class/src/ImgClassProcessing.cc
index adf9794..491e751 100644
--- a/source/use_case/img_class/src/ImgClassProcessing.cc
+++ b/source/application/api/use_case/img_class/src/ImgClassProcessing.cc
@@ -15,6 +15,7 @@
  * limitations under the License.
  */
 #include "ImgClassProcessing.hpp"
+
 #include "ImageUtils.hpp"
 #include "log_macros.h"
 
diff --git a/source/use_case/img_class/src/MobileNetModel.cc b/source/application/api/use_case/img_class/src/MobileNetModel.cc
similarity index 83%
rename from source/use_case/img_class/src/MobileNetModel.cc
rename to source/application/api/use_case/img_class/src/MobileNetModel.cc
index 2e48f3b..b700d70 100644
--- a/source/use_case/img_class/src/MobileNetModel.cc
+++ b/source/application/api/use_case/img_class/src/MobileNetModel.cc
@@ -31,7 +31,6 @@
     this->m_opResolver.AddReshape();
     this->m_opResolver.AddSoftmax();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -39,18 +38,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::MobileNetModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::MobileNetModel::ModelSize()
-{
-    return GetModelLen();
-}
\ No newline at end of file
diff --git a/source/application/api/use_case/inference_runner/CMakeLists.txt b/source/application/api/use_case/inference_runner/CMakeLists.txt
new file mode 100644
index 0000000..d0fe629
--- /dev/null
+++ b/source/application/api/use_case/inference_runner/CMakeLists.txt
@@ -0,0 +1,37 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#              INFERENCE RUNNER API library             #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(INFERENCE_RUNNER_API_TARGET inference_runner_api)
+project(${INFERENCE_RUNNER_API_TARGET}
+        DESCRIPTION     "Inference runner use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${INFERENCE_RUNNER_API_TARGET} STATIC src/TestModel.cc)
+
+target_include_directories(${INFERENCE_RUNNER_API_TARGET} PUBLIC include)
+
+target_link_libraries(${INFERENCE_RUNNER_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${INFERENCE_RUNNER_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/inference_runner/include/TestModel.hpp b/source/application/api/use_case/inference_runner/include/TestModel.hpp
similarity index 93%
rename from source/use_case/inference_runner/include/TestModel.hpp
rename to source/application/api/use_case/inference_runner/include/TestModel.hpp
index 0846bd4..648198c 100644
--- a/source/use_case/inference_runner/include/TestModel.hpp
+++ b/source/application/api/use_case/inference_runner/include/TestModel.hpp
@@ -31,10 +31,6 @@
         /** @brief   Adds operations to the op resolver instance, not needed as using AllOpsResolver. */
         bool EnlistOperations() override {return false;}
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
 
         /* No need to define individual ops at the cost of extra memory. */
@@ -44,4 +40,4 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* INF_RUNNER_TESTMODEL_HPP */
\ No newline at end of file
+#endif /* INF_RUNNER_TESTMODEL_HPP */
diff --git a/source/application/api/use_case/inference_runner/src/TestModel.cc b/source/application/api/use_case/inference_runner/src/TestModel.cc
new file mode 100644
index 0000000..1891e44
--- /dev/null
+++ b/source/application/api/use_case/inference_runner/src/TestModel.cc
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TestModel.hpp"
+#include "log_macros.h"
+
+const tflite::AllOpsResolver& arm::app::TestModel::GetOpResolver()
+{
+    return this->m_opResolver;
+}
diff --git a/source/application/api/use_case/kws/CMakeLists.txt b/source/application/api/use_case/kws/CMakeLists.txt
new file mode 100644
index 0000000..3256d03
--- /dev/null
+++ b/source/application/api/use_case/kws/CMakeLists.txt
@@ -0,0 +1,39 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#            KEYWORD SPOTTING API library               #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(KWS_API_TARGET kws_api)
+project(${KWS_API_TARGET}
+        DESCRIPTION     "Keyword spotting use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${KWS_API_TARGET} STATIC
+    src/KwsProcessing.cc
+    src/MicroNetKwsModel.cc)
+
+target_include_directories(${KWS_API_TARGET} PUBLIC include)
+
+target_link_libraries(${KWS_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${KWS_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/kws/include/KwsProcessing.hpp b/source/application/api/use_case/kws/include/KwsProcessing.hpp
similarity index 98%
rename from source/use_case/kws/include/KwsProcessing.hpp
rename to source/application/api/use_case/kws/include/KwsProcessing.hpp
index d3de3b3..0ede425 100644
--- a/source/use_case/kws/include/KwsProcessing.hpp
+++ b/source/application/api/use_case/kws/include/KwsProcessing.hpp
@@ -17,9 +17,8 @@
 #ifndef KWS_PROCESSING_HPP
 #define KWS_PROCESSING_HPP
 
-#include <AudioUtils.hpp>
+#include "AudioUtils.hpp"
 #include "BaseProcessing.hpp"
-#include "Model.hpp"
 #include "Classifier.hpp"
 #include "MicroNetKwsMfcc.hpp"
 
diff --git a/source/use_case/kws/include/KwsResult.hpp b/source/application/api/use_case/kws/include/KwsResult.hpp
similarity index 100%
rename from source/use_case/kws/include/KwsResult.hpp
rename to source/application/api/use_case/kws/include/KwsResult.hpp
diff --git a/source/use_case/kws/include/MicroNetKwsMfcc.hpp b/source/application/api/use_case/kws/include/MicroNetKwsMfcc.hpp
similarity index 100%
rename from source/use_case/kws/include/MicroNetKwsMfcc.hpp
rename to source/application/api/use_case/kws/include/MicroNetKwsMfcc.hpp
diff --git a/source/use_case/kws_asr/include/MicroNetKwsModel.hpp b/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
similarity index 90%
rename from source/use_case/kws_asr/include/MicroNetKwsModel.hpp
rename to source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
index 22cf916..3d2f3de 100644
--- a/source/use_case/kws_asr/include/MicroNetKwsModel.hpp
+++ b/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
@@ -14,8 +14,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-#ifndef KWS_ASR_MICRONETMODEL_HPP
-#define KWS_ASR_MICRONETMODEL_HPP
+#ifndef KWS_MICRONETMODEL_HPP
+#define KWS_MICRONETMODEL_HPP
 
 #include "Model.hpp"
 
@@ -33,6 +33,7 @@
 
 namespace arm {
 namespace app {
+
     class MicroNetKwsModel : public Model {
     public:
         /* Indices for the expected model - based on input and output tensor shapes */
@@ -48,10 +49,6 @@
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
         /* Maximum number of individual operations that can be enlisted. */
         static constexpr int ms_maxOpCnt = 7;
@@ -63,4 +60,4 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* KWS_ASR_MICRONETMODEL_HPP */
+#endif /* KWS_MICRONETMODEL_HPP */
diff --git a/source/use_case/kws/src/KwsProcessing.cc b/source/application/api/use_case/kws/src/KwsProcessing.cc
similarity index 99%
rename from source/use_case/kws/src/KwsProcessing.cc
rename to source/application/api/use_case/kws/src/KwsProcessing.cc
index 328709d..40de498 100644
--- a/source/use_case/kws/src/KwsProcessing.cc
+++ b/source/application/api/use_case/kws/src/KwsProcessing.cc
@@ -15,7 +15,6 @@
  * limitations under the License.
  */
 #include "KwsProcessing.hpp"
-#include "ImageUtils.hpp"
 #include "log_macros.h"
 #include "MicroNetKwsModel.hpp"
 
diff --git a/source/use_case/kws/src/MicroNetKwsModel.cc b/source/application/api/use_case/kws/src/MicroNetKwsModel.cc
similarity index 83%
rename from source/use_case/kws/src/MicroNetKwsModel.cc
rename to source/application/api/use_case/kws/src/MicroNetKwsModel.cc
index 1c38525..bedca99 100644
--- a/source/use_case/kws/src/MicroNetKwsModel.cc
+++ b/source/application/api/use_case/kws/src/MicroNetKwsModel.cc
@@ -31,7 +31,6 @@
     this->m_opResolver.AddFullyConnected();
     this->m_opResolver.AddRelu();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -39,18 +38,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::MicroNetKwsModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::MicroNetKwsModel::ModelSize()
-{
-    return GetModelLen();
-}
\ No newline at end of file
diff --git a/source/application/api/use_case/noise_reduction/CMakeLists.txt b/source/application/api/use_case/noise_reduction/CMakeLists.txt
new file mode 100644
index 0000000..5fa9a73
--- /dev/null
+++ b/source/application/api/use_case/noise_reduction/CMakeLists.txt
@@ -0,0 +1,40 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#            NOISE REDUCTION API library                #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(NOISE_REDUCTION_API_TARGET noise_reduction_api)
+project(${NOISE_REDUCTION_API_TARGET}
+        DESCRIPTION     "Noise reduction use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${NOISE_REDUCTION_API_TARGET} STATIC
+        src/RNNoiseProcessing.cc
+        src/RNNoiseFeatureProcessor.cc
+        src/RNNoiseModel.cc)
+
+target_include_directories(${NOISE_REDUCTION_API_TARGET} PUBLIC include)
+
+target_link_libraries(${NOISE_REDUCTION_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${NOISE_REDUCTION_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/noise_reduction/include/RNNoiseFeatureProcessor.hpp b/source/application/api/use_case/noise_reduction/include/RNNoiseFeatureProcessor.hpp
similarity index 100%
rename from source/use_case/noise_reduction/include/RNNoiseFeatureProcessor.hpp
rename to source/application/api/use_case/noise_reduction/include/RNNoiseFeatureProcessor.hpp
diff --git a/source/use_case/noise_reduction/include/RNNoiseModel.hpp b/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
similarity index 96%
rename from source/use_case/noise_reduction/include/RNNoiseModel.hpp
rename to source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
index f6e4510..3d2f23c 100644
--- a/source/use_case/noise_reduction/include/RNNoiseModel.hpp
+++ b/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
@@ -59,10 +59,6 @@
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
         /*
         Each inference after the first needs to copy 3 GRU states from a output index to input index (model dependent):
         0 -> 3, 2 -> 2, 3 -> 1
@@ -79,4 +75,4 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* RNNOISE_MODEL_HPP */
\ No newline at end of file
+#endif /* RNNOISE_MODEL_HPP */
diff --git a/source/use_case/noise_reduction/include/RNNoiseProcessing.hpp b/source/application/api/use_case/noise_reduction/include/RNNoiseProcessing.hpp
similarity index 100%
rename from source/use_case/noise_reduction/include/RNNoiseProcessing.hpp
rename to source/application/api/use_case/noise_reduction/include/RNNoiseProcessing.hpp
diff --git a/source/use_case/noise_reduction/src/RNNoiseFeatureProcessor.cc b/source/application/api/use_case/noise_reduction/src/RNNoiseFeatureProcessor.cc
similarity index 100%
rename from source/use_case/noise_reduction/src/RNNoiseFeatureProcessor.cc
rename to source/application/api/use_case/noise_reduction/src/RNNoiseFeatureProcessor.cc
diff --git a/source/use_case/noise_reduction/src/RNNoiseModel.cc b/source/application/api/use_case/noise_reduction/src/RNNoiseModel.cc
similarity index 93%
rename from source/use_case/noise_reduction/src/RNNoiseModel.cc
rename to source/application/api/use_case/noise_reduction/src/RNNoiseModel.cc
index 244fa1a..457cda9 100644
--- a/source/use_case/noise_reduction/src/RNNoiseModel.cc
+++ b/source/application/api/use_case/noise_reduction/src/RNNoiseModel.cc
@@ -39,7 +39,6 @@
     this->m_opResolver.AddConcatenation();
     this->m_opResolver.AddRelu();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -47,22 +46,9 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
 
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::RNNoiseModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::RNNoiseModel::ModelSize()
-{
-    return GetModelLen();
-}
-
 bool arm::app::RNNoiseModel::RunInference()
 {
     return Model::RunInference();
@@ -107,4 +93,4 @@
         memcpy(inputGruState, outGruState, inputGruStateTensor->bytes);
     }
     return true;
-}
\ No newline at end of file
+}
diff --git a/source/use_case/noise_reduction/src/RNNoiseProcessing.cc b/source/application/api/use_case/noise_reduction/src/RNNoiseProcessing.cc
similarity index 100%
rename from source/use_case/noise_reduction/src/RNNoiseProcessing.cc
rename to source/application/api/use_case/noise_reduction/src/RNNoiseProcessing.cc
diff --git a/source/application/api/use_case/object_detection/CMakeLists.txt b/source/application/api/use_case/object_detection/CMakeLists.txt
new file mode 100644
index 0000000..797ff55
--- /dev/null
+++ b/source/application/api/use_case/object_detection/CMakeLists.txt
@@ -0,0 +1,40 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#             OBJECT DETECTION API library              #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(OBJECT_DETECTION_API_TARGET object_detection_api)
+project(${OBJECT_DETECTION_API_TARGET}
+        DESCRIPTION     "Object detection use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${OBJECT_DETECTION_API_TARGET} STATIC
+        src/DetectorPreProcessing.cc
+        src/DetectorPostProcessing.cc
+        src/YoloFastestModel.cc)
+
+target_include_directories(${OBJECT_DETECTION_API_TARGET} PUBLIC include)
+
+target_link_libraries(${OBJECT_DETECTION_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${OBJECT_DETECTION_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/object_detection/include/DetectionResult.hpp b/source/application/api/use_case/object_detection/include/DetectionResult.hpp
similarity index 100%
rename from source/use_case/object_detection/include/DetectionResult.hpp
rename to source/application/api/use_case/object_detection/include/DetectionResult.hpp
diff --git a/source/use_case/object_detection/include/DetectorPostProcessing.hpp b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
similarity index 99%
rename from source/use_case/object_detection/include/DetectorPostProcessing.hpp
rename to source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
index b3ddb2c..30bc123 100644
--- a/source/use_case/object_detection/include/DetectorPostProcessing.hpp
+++ b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
@@ -17,7 +17,6 @@
 #ifndef DETECTOR_POST_PROCESSING_HPP
 #define DETECTOR_POST_PROCESSING_HPP
 
-#include "UseCaseCommonUtils.hpp"
 #include "ImageUtils.hpp"
 #include "DetectionResult.hpp"
 #include "YoloFastestModel.hpp"
diff --git a/source/use_case/object_detection/include/DetectorPreProcessing.hpp b/source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp
similarity index 100%
rename from source/use_case/object_detection/include/DetectorPreProcessing.hpp
rename to source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp
diff --git a/source/use_case/object_detection/include/YoloFastestModel.hpp b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
similarity index 95%
rename from source/use_case/object_detection/include/YoloFastestModel.hpp
rename to source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
index 2986a58..4c64433 100644
--- a/source/use_case/object_detection/include/YoloFastestModel.hpp
+++ b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
@@ -42,10 +42,6 @@
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
 
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
     private:
         /* Maximum number of individual operations that can be enlisted. */
         static constexpr int ms_maxOpCnt = 8;
diff --git a/source/use_case/object_detection/src/DetectorPostProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
similarity index 100%
rename from source/use_case/object_detection/src/DetectorPostProcessing.cc
rename to source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
diff --git a/source/use_case/object_detection/src/DetectorPreProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc
similarity index 100%
rename from source/use_case/object_detection/src/DetectorPreProcessing.cc
rename to source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc
diff --git a/source/use_case/object_detection/src/YoloFastestModel.cc b/source/application/api/use_case/object_detection/src/YoloFastestModel.cc
similarity index 84%
rename from source/use_case/object_detection/src/YoloFastestModel.cc
rename to source/application/api/use_case/object_detection/src/YoloFastestModel.cc
index b1fd776..e293181 100644
--- a/source/use_case/object_detection/src/YoloFastestModel.cc
+++ b/source/application/api/use_case/object_detection/src/YoloFastestModel.cc
@@ -34,7 +34,6 @@
     this->m_opResolver.AddMaxPool2D();
     this->m_opResolver.AddConcatenation();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -42,18 +41,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::YoloFastestModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::YoloFastestModel::ModelSize()
-{
-    return GetModelLen();
-}
diff --git a/source/application/api/use_case/vww/CMakeLists.txt b/source/application/api/use_case/vww/CMakeLists.txt
new file mode 100644
index 0000000..b933d32
--- /dev/null
+++ b/source/application/api/use_case/vww/CMakeLists.txt
@@ -0,0 +1,39 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+#             VISUAL WAKE WORD API library              #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(VWW_API_TARGET vww_api)
+project(${VWW_API_TARGET}
+        DESCRIPTION     "Visual wake word use case API library"
+        LANGUAGES       C CXX)
+
+# Create static library
+add_library(${VWW_API_TARGET} STATIC
+        src/VisualWakeWordProcessing.cc
+        src/VisualWakeWordModel.cc)
+
+target_include_directories(${VWW_API_TARGET} PUBLIC include)
+
+target_link_libraries(${VWW_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${VWW_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/use_case/vww/include/VisualWakeWordModel.hpp b/source/application/api/use_case/vww/include/VisualWakeWordModel.hpp
similarity index 94%
rename from source/use_case/vww/include/VisualWakeWordModel.hpp
rename to source/application/api/use_case/vww/include/VisualWakeWordModel.hpp
index 1ed9202..a34b904 100644
--- a/source/use_case/vww/include/VisualWakeWordModel.hpp
+++ b/source/application/api/use_case/vww/include/VisualWakeWordModel.hpp
@@ -36,10 +36,6 @@
 
         /** @brief   Adds operations to the op resolver instance. */
         bool EnlistOperations() override;
-
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
     private:
         /* Maximum number of individual operations that can be enlisted. */
         static constexpr int ms_maxOpCnt = 7;
diff --git a/source/use_case/vww/include/VisualWakeWordProcessing.hpp b/source/application/api/use_case/vww/include/VisualWakeWordProcessing.hpp
similarity index 100%
rename from source/use_case/vww/include/VisualWakeWordProcessing.hpp
rename to source/application/api/use_case/vww/include/VisualWakeWordProcessing.hpp
diff --git a/source/use_case/vww/src/VisualWakeWordModel.cc b/source/application/api/use_case/vww/src/VisualWakeWordModel.cc
similarity index 82%
rename from source/use_case/vww/src/VisualWakeWordModel.cc
rename to source/application/api/use_case/vww/src/VisualWakeWordModel.cc
index 59beccc..2d8a125 100644
--- a/source/use_case/vww/src/VisualWakeWordModel.cc
+++ b/source/application/api/use_case/vww/src/VisualWakeWordModel.cc
@@ -31,7 +31,6 @@
     this->m_opResolver.AddPad();
     this->m_opResolver.AddAdd();
 
-#if defined(ARM_NPU)
     if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
@@ -39,18 +38,5 @@
         printf_err("Failed to add Arm NPU support to op resolver.");
         return false;
     }
-#endif /* ARM_NPU */
     return true;
 }
-
-extern uint8_t* GetModelPointer();
-const uint8_t* arm::app::VisualWakeWordModel::ModelPointer()
-{
-    return GetModelPointer();
-}
-
-extern size_t GetModelLen();
-size_t arm::app::VisualWakeWordModel::ModelSize()
-{
-    return GetModelLen();
-}
\ No newline at end of file
diff --git a/source/use_case/vww/src/VisualWakeWordProcessing.cc b/source/application/api/use_case/vww/src/VisualWakeWordProcessing.cc
similarity index 100%
rename from source/use_case/vww/src/VisualWakeWordProcessing.cc
rename to source/application/api/use_case/vww/src/VisualWakeWordProcessing.cc
diff --git a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp b/source/application/main/include/BufAttributes.hpp
similarity index 100%
rename from source/application/tensorflow-lite-micro/include/BufAttributes.hpp
rename to source/application/main/include/BufAttributes.hpp
diff --git a/source/application/main/include/UseCaseCommonUtils.hpp b/source/application/main/include/UseCaseCommonUtils.hpp
index 9b6d550..b0f2e7a 100644
--- a/source/application/main/include/UseCaseCommonUtils.hpp
+++ b/source/application/main/include/UseCaseCommonUtils.hpp
@@ -24,6 +24,7 @@
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
 #include "Classifier.hpp"           /* Classifier. */
 #include "InputFiles.hpp"
+#include "BufAttributes.hpp"        /* Buffer attributes */
 
 
 void DisplayCommonMenu();
diff --git a/source/hal/source/platform/mps3/CMakeLists.txt b/source/hal/source/platform/mps3/CMakeLists.txt
index 2c03dee..46da2fa 100644
--- a/source/hal/source/platform/mps3/CMakeLists.txt
+++ b/source/hal/source/platform/mps3/CMakeLists.txt
@@ -41,12 +41,22 @@
     set(ETHOS_U_SEC_ENABLED  "1"            CACHE STRING "Ethos-U NPU Security enable")
     set(ETHOS_U_PRIV_ENABLED "1"            CACHE STRING "Ethos-U NPU Privilege enable")
 
+    set(DYNAMIC_MODEL_BASE   "0x90000000"   CACHE STRING "Region to be used for dynamic load of model into memory")
+    set(DYNAMIC_MODEL_SIZE   "0x02000000"   CACHE STRING "Size of the space reserved for the model")
+
     if (ETHOS_U_NPU_TIMING_ADAPTER_ENABLED)
         set(TA0_BASE         "0x58103000"   CACHE STRING "Ethos-U NPU timing adapter 0")
         set(TA1_BASE         "0x58103200"   CACHE STRING "Ethos-U NPU timing adapter 1")
     endif()
 endif()
 
+math(EXPR IFM_BASE      "${DYNAMIC_MODEL_BASE} + ${DYNAMIC_MODEL_SIZE}" OUTPUT_FORMAT HEXADECIMAL)
+set(DYNAMIC_IFM_BASE    "${IFM_BASE}" CACHE STRING "Base address for IFMs to be loaded")
+set(DYNAMIC_IFM_SIZE    "0x01000000" CACHE STRING "Size of the space reserved for the IFM")
+math(EXPR OFM_BASE      "${DYNAMIC_IFM_BASE} + ${DYNAMIC_IFM_SIZE}" OUTPUT_FORMAT HEXADECIMAL)
+set(DYNAMIC_OFM_BASE    "${OFM_BASE}" CACHE STRING "Base address for OFMs to be dumped to")
+set(DYNAMIC_OFM_SIZE    "0x01000000" CACHE STRING "Size of the space reserved for the OFM")
+
 # 2. Create static library
 add_library(${PLATFORM_DRIVERS_TARGET} STATIC)
 
diff --git a/source/use_case/ad/src/MainLoop.cc b/source/use_case/ad/src/MainLoop.cc
index 140359b..e9f7b4e 100644
--- a/source/use_case/ad/src/MainLoop.cc
+++ b/source/use_case/ad/src/MainLoop.cc
@@ -18,7 +18,17 @@
 #include "AdModel.hpp"              /* Model class for running inference */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
 
 enum opcodes
 {
@@ -49,12 +59,23 @@
     arm::app::AdModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init())
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()))
     {
         printf_err("failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
diff --git a/source/use_case/ad/usecase.cmake b/source/use_case/ad/usecase.cmake
index 23b4c32..06d7681 100644
--- a/source/use_case/ad/usecase.cmake
+++ b/source/use_case/ad/usecase.cmake
@@ -15,6 +15,9 @@
 #  limitations under the License.
 #----------------------------------------------------------------------------
 
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "ad")
+
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single input WAV file, to use in the evaluation application."
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
     PATH_OR_FILE)
diff --git a/source/use_case/asr/include/Wav2LetterModel.hpp b/source/use_case/asr/include/Wav2LetterModel.hpp
deleted file mode 100644
index bec70ab..0000000
--- a/source/use_case/asr/include/Wav2LetterModel.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ASR_WAV2LETTER_MODEL_HPP
-#define ASR_WAV2LETTER_MODEL_HPP
-
-#include "Model.hpp"
-
-extern const int g_FrameLength;
-extern const int g_FrameStride;
-extern const float g_ScoreThreshold;
-extern const int g_ctxLen;
-
-namespace arm {
-namespace app {
-
-    class Wav2LetterModel : public Model {
-
-    public:
-        /* Indices for the expected model - based on input and output tensor shapes */
-        static constexpr uint32_t ms_inputRowsIdx  = 1;
-        static constexpr uint32_t ms_inputColsIdx  = 2;
-        static constexpr uint32_t ms_outputRowsIdx = 2;
-        static constexpr uint32_t ms_outputColsIdx = 3;
-
-        /* Model specific constants. */
-        static constexpr uint32_t ms_blankTokenIdx   = 28;
-        static constexpr uint32_t ms_numMfccFeatures = 13;
-
-    protected:
-        /** @brief   Gets the reference to op resolver interface class. */
-        const tflite::MicroOpResolver& GetOpResolver() override;
-
-        /** @brief   Adds operations to the op resolver instance. */
-        bool EnlistOperations() override;
-
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
-    private:
-        /* Maximum number of individual operations that can be enlisted. */
-        static constexpr int ms_maxOpCnt = 5;
-
-        /* A mutable op resolver instance. */
-        tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* ASR_WAV2LETTER_MODEL_HPP */
diff --git a/source/use_case/asr/src/MainLoop.cc b/source/use_case/asr/src/MainLoop.cc
index a1a9540..7acd319 100644
--- a/source/use_case/asr/src/MainLoop.cc
+++ b/source/use_case/asr/src/MainLoop.cc
@@ -20,7 +20,18 @@
 #include "UseCaseCommonUtils.hpp"    /* Utils functions. */
 #include "AsrClassifier.hpp"         /* Classifier. */
 #include "InputFiles.hpp"            /* Generated audio clip header. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+namespace app {
+namespace asr {
+    static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    extern uint8_t* GetModelPointer();
+    extern size_t GetModelLen();
+} /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
 
 enum opcodes
 {
@@ -53,7 +64,10 @@
     arm::app::Wav2LetterModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::asr::tensorArena,
+                    sizeof(arm::app::asr::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     } else if (!VerifyTensorDimensions(model)) {
@@ -61,6 +75,14 @@
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
     std::vector <std::string> labels;
@@ -71,10 +93,10 @@
     caseContext.Set<arm::app::Profiler&>("profiler", profiler);
     caseContext.Set<arm::app::Model&>("model", model);
     caseContext.Set<uint32_t>("clipIndex", 0);
-    caseContext.Set<uint32_t>("frameLength", g_FrameLength);
-    caseContext.Set<uint32_t>("frameStride", g_FrameStride);
-    caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);  /* Score threshold. */
-    caseContext.Set<uint32_t>("ctxLen", g_ctxLen);  /* Left and right context length (MFCC feat vectors). */
+    caseContext.Set<uint32_t>("frameLength", arm::app::asr::g_FrameLength);
+    caseContext.Set<uint32_t>("frameStride", arm::app::asr::g_FrameStride);
+    caseContext.Set<float>("scoreThreshold", arm::app::asr::g_ScoreThreshold);  /* Score threshold. */
+    caseContext.Set<uint32_t>("ctxLen", arm::app::asr::g_ctxLen);  /* Left and right context length (MFCC feat vectors). */
     caseContext.Set<const std::vector <std::string>&>("labels", labels);
     caseContext.Set<arm::app::AsrClassifier&>("classifier", classifier);
 
diff --git a/source/use_case/asr/usecase.cmake b/source/use_case/asr/usecase.cmake
index 50e7e26..2a2178b 100644
--- a/source/use_case/asr/usecase.cmake
+++ b/source/use_case/asr/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "asr")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single WAV file, to use in the evaluation application."
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
@@ -98,4 +100,4 @@
     MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
     DESTINATION ${SRC_GEN_DIR}
     EXPRESSIONS ${EXTRA_MODEL_CODE}
-    )
+    NAMESPACE   "arm" "app" "asr")
diff --git a/source/use_case/img_class/src/MainLoop.cc b/source/use_case/img_class/src/MainLoop.cc
index d9fb925..de3779f 100644
--- a/source/use_case/img_class/src/MainLoop.cc
+++ b/source/use_case/img_class/src/MainLoop.cc
@@ -21,7 +21,16 @@
 #include "MobileNetModel.hpp"       /* Model class for running inference. */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
-#include "log_macros.h"
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
 
 using ImgClassClassifier = arm::app::Classifier;
 
@@ -30,11 +39,22 @@
     arm::app::MobileNetModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
diff --git a/source/use_case/img_class/usecase.cmake b/source/use_case/img_class/usecase.cmake
index dafdbbf..2a8be09 100644
--- a/source/use_case/img_class/usecase.cmake
+++ b/source/use_case/img_class/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "img_class")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files to use, or path to a single image, in the evaluation application"
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
diff --git a/source/use_case/inference_runner/src/MainLoop.cc b/source/use_case/inference_runner/src/MainLoop.cc
index ddff40c..0991b7b 100644
--- a/source/use_case/inference_runner/src/MainLoop.cc
+++ b/source/use_case/inference_runner/src/MainLoop.cc
@@ -18,7 +18,37 @@
 #include "TestModel.hpp"            /* Model class for running inference. */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+#if defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE)
+
+static uint8_t* GetModelPointer()
+{
+    info("Model pointer: 0x%08x\n", DYNAMIC_MODEL_BASE);
+    return reinterpret_cast<uint8_t *>(DYNAMIC_MODEL_BASE);
+}
+
+static size_t GetModelLen()
+{
+    /* TODO: Can we get the actual model size here somehow?
+     * Currently we return the reserved space. It is possible to do
+     * so by reading the memory pattern but it will not be reliable. */
+    return static_cast<size_t>(DYNAMIC_MODEL_SIZE);
+}
+
+#else /* defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE) */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
+#endif /* defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE) */
 
 enum opcodes
 {
@@ -31,11 +61,22 @@
     arm::app::TestModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
diff --git a/source/use_case/inference_runner/src/TestModel.cc b/source/use_case/inference_runner/src/TestModel.cc
deleted file mode 100644
index 3e72119..0000000
--- a/source/use_case/inference_runner/src/TestModel.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "TestModel.hpp"
-#include "log_macros.h"
-
-const tflite::AllOpsResolver& arm::app::TestModel::GetOpResolver()
-{
-    return this->m_opResolver;
-}
-
-#if defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE)
-
-    const uint8_t* arm::app::TestModel::ModelPointer()
-    {
-        info("Model pointer: 0x%08x\n", DYNAMIC_MODEL_BASE);
-        return reinterpret_cast<uint8_t *>(DYNAMIC_MODEL_BASE);
-    }
-
-    size_t arm::app::TestModel::ModelSize()
-    {
-        /* TODO: Can we get the actual model size here somehow?
-         * Currently we return the reserved space. It is possible to do
-         * so by reading the memory pattern but it will not be reliable. */
-        return static_cast<size_t>(DYNAMIC_MODEL_SIZE);
-    }
-
-#else /* defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE) */
-
-    extern uint8_t* GetModelPointer();
-    const uint8_t* arm::app::TestModel::ModelPointer()
-    {
-        return GetModelPointer();
-    }
-
-    extern size_t GetModelLen();
-    size_t arm::app::TestModel::ModelSize()
-    {
-        return GetModelLen();
-    }
-
-#endif /* defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE) */
diff --git a/source/use_case/inference_runner/usecase.cmake b/source/use_case/inference_runner/usecase.cmake
index 7d12120..c70be71 100644
--- a/source/use_case/inference_runner/usecase.cmake
+++ b/source/use_case/inference_runner/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "inference_runner")
 
 USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
     0x00200000
diff --git a/source/use_case/kws/include/MicroNetKwsModel.hpp b/source/use_case/kws/include/MicroNetKwsModel.hpp
deleted file mode 100644
index 3259c45..0000000
--- a/source/use_case/kws/include/MicroNetKwsModel.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_MICRONETMODEL_HPP
-#define KWS_MICRONETMODEL_HPP
-
-#include "Model.hpp"
-
-extern const int g_FrameLength;
-extern const int g_FrameStride;
-extern const float g_ScoreThreshold;
-
-namespace arm {
-namespace app {
-
-    class MicroNetKwsModel : public Model {
-    public:
-        /* Indices for the expected model - based on input and output tensor shapes */
-        static constexpr uint32_t ms_inputRowsIdx = 1;
-        static constexpr uint32_t ms_inputColsIdx = 2;
-        static constexpr uint32_t ms_outputRowsIdx = 2;
-        static constexpr uint32_t ms_outputColsIdx = 3;
-    
-    protected:
-        /** @brief   Gets the reference to op resolver interface class. */
-        const tflite::MicroOpResolver& GetOpResolver() override;
-
-        /** @brief   Adds operations to the op resolver instance. */
-        bool EnlistOperations() override;
-
-        const uint8_t* ModelPointer() override;
-
-        size_t ModelSize() override;
-
-    private:
-        /* Maximum number of individual operations that can be enlisted. */
-        static constexpr int ms_maxOpCnt = 7;
-
-        /* A mutable op resolver instance. */
-        tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_MICRONETMODEL_HPP */
diff --git a/source/use_case/kws/src/MainLoop.cc b/source/use_case/kws/src/MainLoop.cc
index e590c4a..3c35a7f 100644
--- a/source/use_case/kws/src/MainLoop.cc
+++ b/source/use_case/kws/src/MainLoop.cc
@@ -21,7 +21,18 @@
 #include "Labels.hpp"               /* For label strings. */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+namespace app {
+namespace kws {
+    static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    extern uint8_t *GetModelPointer();
+    extern size_t GetModelLen();
+} /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
 
 using KwsClassifier = arm::app::Classifier;
 
@@ -53,11 +64,22 @@
     arm::app::MicroNetKwsModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::kws::tensorArena,
+                    sizeof(arm::app::kws::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
@@ -65,9 +87,9 @@
     caseContext.Set<arm::app::Profiler&>("profiler", profiler);
     caseContext.Set<arm::app::Model&>("model", model);
     caseContext.Set<uint32_t>("clipIndex", 0);
-    caseContext.Set<int>("frameLength", g_FrameLength);
-    caseContext.Set<int>("frameStride", g_FrameStride);
-    caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);  /* Normalised score threshold. */
+    caseContext.Set<int>("frameLength", arm::app::kws::g_FrameLength);
+    caseContext.Set<int>("frameStride", arm::app::kws::g_FrameStride);
+    caseContext.Set<float>("scoreThreshold", arm::app::kws::g_ScoreThreshold);  /* Normalised score threshold. */
 
     KwsClassifier classifier;  /* classifier wrapper object. */
     caseContext.Set<arm::app::Classifier&>("classifier", classifier);
@@ -114,4 +136,4 @@
         }
     } while (executionSuccessful && bUseMenu);
     info("Main loop terminated.\n");
-}
\ No newline at end of file
+}
diff --git a/source/use_case/kws/usecase.cmake b/source/use_case/kws/usecase.cmake
index 9f3736e..d9985c7 100644
--- a/source/use_case/kws/usecase.cmake
+++ b/source/use_case/kws/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "kws")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single WAV file, to use in the evaluation application."
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
@@ -96,4 +98,5 @@
     MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
     DESTINATION ${SRC_GEN_DIR}
     EXPRESSIONS ${EXTRA_MODEL_CODE}
+    NAMESPACE   "arm" "app" "kws"
 )
diff --git a/source/use_case/kws_asr/include/AsrClassifier.hpp b/source/use_case/kws_asr/include/AsrClassifier.hpp
deleted file mode 100644
index 6ab9685..0000000
--- a/source/use_case/kws_asr/include/AsrClassifier.hpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ASR_CLASSIFIER_HPP
-#define ASR_CLASSIFIER_HPP
-
-#include "Classifier.hpp"
-
-namespace arm {
-namespace app {
-
-    class AsrClassifier : public Classifier {
-    public:
-        /**
-         * @brief       Gets the top N classification results from the
-         *              output vector.
-         * @param[in]   outputTensor   Inference output tensor from an NN model.
-         * @param[out]  vecResults     A vector of classification results
-         *                             populated by this function.
-         * @param[in]   labels         Labels vector to match classified classes
-         * @param[in]   topNCount      Number of top classifications to pick.
-         * @param[in]   use_softmax    Whether softmax scaling should be applied to model output.
-         * @return      true if successful, false otherwise.
-         **/
-        bool GetClassificationResults(
-                TfLiteTensor* outputTensor,
-                std::vector<ClassificationResult>& vecResults,
-                const std::vector <std::string>& labels, uint32_t topNCount,
-                bool use_softmax = false) override;
-
-    private:
-
-        /**
-         * @brief       Utility function that gets the top 1 classification results from the
-         *              output tensor (vector of vector).
-         * @param[in]   tensor       Inference output tensor from an NN model.
-         * @param[out]  vecResults   A vector of classification results
-         *                           populated by this function.
-         * @param[in]   labels       Labels vector to match classified classes.
-         * @param[in]   scale        Quantization scale.
-         * @param[in]   zeroPoint    Quantization zero point.
-         * @return      true if successful, false otherwise.
-         **/
-        template<typename T>
-        bool GetTopResults(TfLiteTensor* tensor,
-                           std::vector<ClassificationResult>& vecResults,
-                           const std::vector <std::string>& labels, double scale, double zeroPoint);
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* ASR_CLASSIFIER_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/AsrResult.hpp b/source/use_case/kws_asr/include/AsrResult.hpp
deleted file mode 100644
index 25fa9e8..0000000
--- a/source/use_case/kws_asr/include/AsrResult.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ASR_RESULT_HPP
-#define ASR_RESULT_HPP
-
-#include "ClassificationResult.hpp"
-
-#include <vector>
-
-namespace arm {
-namespace app {
-namespace asr {
-
-    using ResultVec = std::vector<arm::app::ClassificationResult>;
-
-    /* Structure for holding asr result. */
-    class AsrResult {
-
-    public:
-        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
-        float           m_timeStamp;        /* Audio timestamp for this result. */
-        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
-        float           m_threshold;        /* Threshold value for `m_resultVec` */
-
-        AsrResult() = delete;
-        AsrResult(ResultVec&        resultVec,
-                  const float       timestamp,
-                  const uint32_t    inferenceIdx,
-                  const float       scoreThreshold) {
-
-            this->m_threshold = scoreThreshold;
-            this->m_timeStamp = timestamp;
-            this->m_inferenceNumber = inferenceIdx;
-
-            this->m_resultVec = ResultVec();
-            for (auto& i : resultVec) {
-                if (i.m_normalisedVal >= this->m_threshold) {
-                    this->m_resultVec.emplace_back(i);
-                }
-            }
-        }
-        ~AsrResult() = default;
-    };
-
-} /* namespace asr */
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* ASR_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/KwsProcessing.hpp b/source/use_case/kws_asr/include/KwsProcessing.hpp
deleted file mode 100644
index d3de3b3..0000000
--- a/source/use_case/kws_asr/include/KwsProcessing.hpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_PROCESSING_HPP
-#define KWS_PROCESSING_HPP
-
-#include <AudioUtils.hpp>
-#include "BaseProcessing.hpp"
-#include "Model.hpp"
-#include "Classifier.hpp"
-#include "MicroNetKwsMfcc.hpp"
-
-#include <functional>
-
-namespace arm {
-namespace app {
-
-    /**
-     * @brief   Pre-processing class for Keyword Spotting use case.
-     *          Implements methods declared by BasePreProcess and anything else needed
-     *          to populate input tensors ready for inference.
-     */
-    class KwsPreProcess : public BasePreProcess {
-
-    public:
-        /**
-         * @brief       Constructor
-         * @param[in]   inputTensor        Pointer to the TFLite Micro input Tensor.
-         * @param[in]   numFeatures        How many MFCC features to use.
-         * @param[in]   numFeatureFrames   Number of MFCC vectors that need to be calculated
-         *                                 for an inference.
-         * @param[in]   mfccFrameLength    Number of audio samples used to calculate one set of MFCC values when
-         *                                 sliding a window through the audio sample.
-         * @param[in]   mfccFrameStride    Number of audio samples between consecutive windows.
-         **/
-        explicit KwsPreProcess(TfLiteTensor* inputTensor, size_t numFeatures, size_t numFeatureFrames,
-                               int mfccFrameLength, int mfccFrameStride);
-
-        /**
-         * @brief       Should perform pre-processing of 'raw' input audio data and load it into
-         *              TFLite Micro input tensors ready for inference.
-         * @param[in]   input      Pointer to the data that pre-processing will work on.
-         * @param[in]   inputSize  Size of the input data.
-         * @return      true if successful, false otherwise.
-         **/
-        bool DoPreProcess(const void* input, size_t inputSize) override;
-
-        size_t m_audioWindowIndex = 0;  /* Index of audio slider, used when caching features in longer clips. */
-        size_t m_audioDataWindowSize;   /* Amount of audio needed for 1 inference. */
-        size_t m_audioDataStride;       /* Amount of audio to stride across if doing >1 inference in longer clips. */
-
-    private:
-        TfLiteTensor* m_inputTensor;    /* Model input tensor. */
-        const int m_mfccFrameLength;
-        const int m_mfccFrameStride;
-        const size_t m_numMfccFrames;   /* How many sets of m_numMfccFeats. */
-
-        audio::MicroNetKwsMFCC m_mfcc;
-        audio::SlidingWindow<const int16_t> m_mfccSlidingWindow;
-        size_t m_numMfccVectorsInAudioStride;
-        size_t m_numReusedMfccVectors;
-        std::function<void (std::vector<int16_t>&, int, bool, size_t)> m_mfccFeatureCalculator;
-
-        /**
-         * @brief Returns a function to perform feature calculation and populates input tensor data with
-         * MFCC data.
-         *
-         * Input tensor data type check is performed to choose correct MFCC feature data type.
-         * If tensor has an integer data type then original features are quantised.
-         *
-         * Warning: MFCC calculator provided as input must have the same life scope as returned function.
-         *
-         * @param[in]       mfcc          MFCC feature calculator.
-         * @param[in,out]   inputTensor   Input tensor pointer to store calculated features.
-         * @param[in]       cacheSize     Size of the feature vectors cache (number of feature vectors).
-         * @return          Function to be called providing audio sample and sliding window index.
-         */
-        std::function<void (std::vector<int16_t>&, int, bool, size_t)>
-        GetFeatureCalculator(audio::MicroNetKwsMFCC&  mfcc,
-                             TfLiteTensor*            inputTensor,
-                             size_t                   cacheSize);
-
-        template<class T>
-        std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
-        FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
-                    std::function<std::vector<T> (std::vector<int16_t>& )> compute);
-    };
-
-    /**
-     * @brief   Post-processing class for Keyword Spotting use case.
-     *          Implements methods declared by BasePostProcess and anything else needed
-     *          to populate result vector.
-     */
-    class KwsPostProcess : public BasePostProcess {
-
-    private:
-        TfLiteTensor* m_outputTensor;                   /* Model output tensor. */
-        Classifier& m_kwsClassifier;                    /* KWS Classifier object. */
-        const std::vector<std::string>& m_labels;       /* KWS Labels. */
-        std::vector<ClassificationResult>& m_results;   /* Results vector for a single inference. */
-
-    public:
-        /**
-         * @brief           Constructor
-         * @param[in]       outputTensor   Pointer to the TFLite Micro output Tensor.
-         * @param[in]       classifier     Classifier object used to get top N results from classification.
-         * @param[in]       labels         Vector of string labels to identify each output of the model.
-         * @param[in/out]   results        Vector of classification results to store decoded outputs.
-         **/
-        KwsPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
-                       const std::vector<std::string>& labels,
-                       std::vector<ClassificationResult>& results);
-
-        /**
-         * @brief    Should perform post-processing of the result of inference then
-         *           populate KWS result data for any later use.
-         * @return   true if successful, false otherwise.
-         **/
-        bool DoPostProcess() override;
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_PROCESSING_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/KwsResult.hpp b/source/use_case/kws_asr/include/KwsResult.hpp
deleted file mode 100644
index 45bb790..0000000
--- a/source/use_case/kws_asr/include/KwsResult.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_RESULT_HPP
-#define KWS_RESULT_HPP
-
-#include "ClassificationResult.hpp"
-
-#include <vector>
-
-namespace arm {
-namespace app {
-namespace kws {
-
-    using ResultVec = std::vector < arm::app::ClassificationResult >;
-
-    /* Structure for holding kws result. */
-    class KwsResult {
-
-    public:
-        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
-        float           m_timeStamp;        /* Audio timestamp for this result. */
-        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
-        float           m_threshold;        /* Threshold value for `m_resultVec.` */
-
-        KwsResult() = delete;
-        KwsResult(ResultVec&        resultVec,
-                  const float       timestamp,
-                  const uint32_t    inferenceIdx,
-                  const float       scoreThreshold) {
-
-            this->m_threshold = scoreThreshold;
-            this->m_timeStamp = timestamp;
-            this->m_inferenceNumber = inferenceIdx;
-
-            this->m_resultVec = ResultVec();
-            for (auto & i : resultVec) {
-                if (i.m_normalisedVal >= this->m_threshold) {
-                    this->m_resultVec.emplace_back(i);
-                }
-            }
-        }
-        ~KwsResult() = default;
-    };
-
-} /* namespace kws */
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/MicroNetKwsMfcc.hpp b/source/use_case/kws_asr/include/MicroNetKwsMfcc.hpp
deleted file mode 100644
index af6ba5f..0000000
--- a/source/use_case/kws_asr/include/MicroNetKwsMfcc.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_ASR_MICRONET_MFCC_HPP
-#define KWS_ASR_MICRONET_MFCC_HPP
-
-#include "Mfcc.hpp"
-
-namespace arm {
-namespace app {
-namespace audio {
-
-    /* Class to provide MicroNet specific MFCC calculation requirements. */
-    class MicroNetKwsMFCC : public MFCC {
-
-    public:
-        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
-        static constexpr uint32_t  ms_defaultNumFbankBins =    40;
-        static constexpr uint32_t  ms_defaultMelLoFreq    =    20;
-        static constexpr uint32_t  ms_defaultMelHiFreq    =  4000;
-        static constexpr bool      ms_defaultUseHtkMethod =  true;
-
-
-        explicit MicroNetKwsMFCC(const size_t numFeats, const size_t frameLen)
-            :  MFCC(MfccParams(
-                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
-                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
-                        numFeats, frameLen, ms_defaultUseHtkMethod))
-        {}
-        MicroNetKwsMFCC()  = delete;
-        ~MicroNetKwsMFCC() = default;
-    };
-
-} /* namespace audio */
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_ASR_MICRONET_MFCC_HPP */
diff --git a/source/use_case/kws_asr/include/OutputDecode.hpp b/source/use_case/kws_asr/include/OutputDecode.hpp
deleted file mode 100644
index cea2c33..0000000
--- a/source/use_case/kws_asr/include/OutputDecode.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_ASR_OUTPUT_DECODE_HPP
-#define KWS_ASR_OUTPUT_DECODE_HPP
-
-#include "AsrClassifier.hpp"
-
-namespace arm {
-namespace app {
-namespace audio {
-namespace asr {
-
-    /**
-     * @brief       Gets the top N classification results from the
-     *              output vector.
-     * @param[in]   vecResults   Label output from classifier.
-     * @return      true if successful, false otherwise.
-    **/
-    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults);
-
-} /* namespace asr */
-} /* namespace audio */
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_ASR_OUTPUT_DECODE_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp b/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp
deleted file mode 100644
index 75d75da..0000000
--- a/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_ASR_WAV2LET_MFCC_HPP
-#define KWS_ASR_WAV2LET_MFCC_HPP
-
-#include "Mfcc.hpp"
-
-namespace arm {
-namespace app {
-namespace audio {
-
-    /* Class to provide Wav2Letter specific MFCC calculation requirements. */
-    class Wav2LetterMFCC : public MFCC {
-
-    public:
-        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
-        static constexpr uint32_t  ms_defaultNumFbankBins =   128;
-        static constexpr uint32_t  ms_defaultMelLoFreq    =     0;
-        static constexpr uint32_t  ms_defaultMelHiFreq    =  8000;
-        static constexpr bool      ms_defaultUseHtkMethod = false;
-
-        explicit Wav2LetterMFCC(const size_t numFeats, const size_t frameLen)
-            :  MFCC(MfccParams(
-                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
-                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
-                        numFeats, frameLen, ms_defaultUseHtkMethod))
-        {}
-
-        Wav2LetterMFCC()  = delete;
-        ~Wav2LetterMFCC() = default;
-
-    protected:
-
-        /**
-         * @brief       Overrides base class implementation of this function.
-         * @param[in]   fftVec                  Vector populated with FFT magnitudes.
-         * @param[in]   melFilterBank           2D Vector with filter bank weights.
-         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
-         *                                      to be used for each bin.
-         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
-         *                                      to be used for each bin.
-         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
-         *                                      populated.
-         * @return      true if successful, false otherwise.
-         */
-        bool ApplyMelFilterBank(
-                std::vector<float>&                 fftVec,
-                std::vector<std::vector<float>>&    melFilterBank,
-                std::vector<uint32_t>&              filterBankFilterFirst,
-                std::vector<uint32_t>&              filterBankFilterLast,
-                std::vector<float>&                 melEnergies) override;
-
-        /**
-         * @brief           Override for the base class implementation convert mel
-         *                  energies to logarithmic scale. The difference from
-         *                  default behaviour is that the power is converted to dB
-         *                  and subsequently clamped.
-         * @param[in,out]   melEnergies   1D vector of Mel energies.
-         **/
-        void ConvertToLogarithmicScale(
-                std::vector<float>& melEnergies) override;
-
-        /**
-         * @brief       Create a matrix used to calculate Discrete Cosine
-         *              Transform. Override for the base class' default
-         *              implementation as the first and last elements
-         *              use a different normaliser.
-         * @param[in]   inputLength        Input length of the buffer on which
-         *                                 DCT will be performed.
-         * @param[in]   coefficientCount   Total coefficients per input length.
-         * @return      1D vector with inputLength x coefficientCount elements
-         *              populated with DCT coefficients.
-         */
-        std::vector<float> CreateDCTMatrix(
-                int32_t inputLength,
-                int32_t coefficientCount) override;
-
-        /**
-         * @brief       Given the low and high Mel values, get the normaliser
-         *              for weights to be applied when populating the filter
-         *              bank. Override for the base class implementation.
-         * @param[in]   leftMel        Low Mel frequency value.
-         * @param[in]   rightMel       High Mel frequency value.
-         * @param[in]   useHTKMethod   Bool to signal if HTK method is to be
-         *                             used for calculation.
-         * @return      Value to use for normalising.
-         */
-        float GetMelFilterBankNormaliser(
-                const float&   leftMel,
-                const float&   rightMel,
-                bool     useHTKMethod) override;
-
-    };
-
-} /* namespace audio */
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_ASR_WAV2LET_MFCC_HPP */
diff --git a/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp b/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp
deleted file mode 100644
index d1bc9a2..0000000
--- a/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_ASR_WAV2LETTER_POSTPROCESS_HPP
-#define KWS_ASR_WAV2LETTER_POSTPROCESS_HPP
-
-#include "TensorFlowLiteMicro.hpp"   /* TensorFlow headers. */
-#include "BaseProcessing.hpp"
-#include "AsrClassifier.hpp"
-#include "AsrResult.hpp"
-#include "log_macros.h"
-
-namespace arm {
-namespace app {
-
-    /**
-     * @brief   Helper class to manage tensor post-processing for "wav2letter"
-     *          output.
-     */
-    class AsrPostProcess : public BasePostProcess {
-    public:
-        bool m_lastIteration = false;   /* Flag to set if processing the last set of data for a clip. */
-
-        /**
-         * @brief           Constructor
-         * @param[in]       outputTensor       Pointer to the TFLite Micro output Tensor.
-         * @param[in]       classifier         Object used to get top N results from classification.
-         * @param[in]       labels             Vector of string labels to identify each output of the model.
-         * @param[in/out]   result             Vector of classification results to store decoded outputs.
-         * @param[in]       outputContextLen   Left/right context length for output tensor.
-         * @param[in]       blankTokenIdx      Index in the labels that the "Blank token" takes.
-         * @param[in]       reductionAxis      The axis that the logits of each time step is on.
-         **/
-        AsrPostProcess(TfLiteTensor* outputTensor, AsrClassifier& classifier,
-                       const std::vector<std::string>& labels, asr::ResultVec& result,
-                       uint32_t outputContextLen,
-                       uint32_t blankTokenIdx, uint32_t reductionAxis);
-
-        /**
-         * @brief    Should perform post-processing of the result of inference then
-         *           populate ASR result data for any later use.
-         * @return   true if successful, false otherwise.
-         **/
-        bool DoPostProcess() override;
-
-        /** @brief   Gets the output inner length for post-processing. */
-        static uint32_t GetOutputInnerLen(const TfLiteTensor*, uint32_t outputCtxLen);
-
-        /** @brief   Gets the output context length (left/right) for post-processing. */
-        static uint32_t GetOutputContextLen(const Model& model, uint32_t inputCtxLen);
-
-        /** @brief   Gets the number of feature vectors to be computed. */
-        static uint32_t GetNumFeatureVectors(const Model& model);
-
-    private:
-        AsrClassifier& m_classifier;                /* ASR Classifier object. */
-        TfLiteTensor* m_outputTensor;               /* Model output tensor. */
-        const std::vector<std::string>& m_labels;   /* ASR Labels. */
-        asr::ResultVec & m_results;                 /* Results vector for a single inference. */
-        uint32_t m_outputContextLen;                /* lengths of left/right contexts for output. */
-        uint32_t m_outputInnerLen;                  /* Length of output inner context. */
-        uint32_t m_totalLen;                        /* Total length of the required axis. */
-        uint32_t m_countIterations;                 /* Current number of iterations. */
-        uint32_t m_blankTokenIdx;                   /* Index of the labels blank token. */
-        uint32_t m_reductionAxisIdx;                /* Axis containing output logits for a single step. */
-
-        /**
-         * @brief    Checks if the tensor and axis index are valid
-         *           inputs to the object - based on how it has been initialised.
-         * @return   true if valid, false otherwise.
-         */
-        bool IsInputValid(TfLiteTensor*  tensor,
-                          uint32_t axisIdx) const;
-
-        /**
-         * @brief    Gets the tensor data element size in bytes based
-         *           on the tensor type.
-         * @return   Size in bytes, 0 if not supported.
-         */
-        static uint32_t GetTensorElementSize(TfLiteTensor* tensor);
-
-        /**
-         * @brief    Erases sections from the data assuming row-wise
-         *           arrangement along the context axis.
-         * @return   true if successful, false otherwise.
-         */
-        bool EraseSectionsRowWise(uint8_t* ptrData,
-                                  uint32_t strideSzBytes,
-                                  bool lastIteration);
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_ASR_WAV2LETTER_POSTPROCESS_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp b/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp
deleted file mode 100644
index 1224c23..0000000
--- a/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef KWS_ASR_WAV2LETTER_PREPROCESS_HPP
-#define KWS_ASR_WAV2LETTER_PREPROCESS_HPP
-
-#include "Wav2LetterModel.hpp"
-#include "Wav2LetterMfcc.hpp"
-#include "AudioUtils.hpp"
-#include "DataStructures.hpp"
-#include "BaseProcessing.hpp"
-#include "log_macros.h"
-
-namespace arm {
-namespace app {
-
-    /* Class to facilitate pre-processing calculation for Wav2Letter model
-     * for ASR. */
-    using AudioWindow = audio::SlidingWindow<const int16_t>;
-
-    class AsrPreProcess : public BasePreProcess {
-    public:
-        /**
-         * @brief       Constructor.
-         * @param[in]   inputTensor        Pointer to the TFLite Micro input Tensor.
-         * @param[in]   numMfccFeatures    Number of MFCC features per window.
-         * @param[in]   numFeatureFrames   Number of MFCC vectors that need to be calculated
-         *                                 for an inference.
-         * @param[in]   mfccWindowLen      Number of audio elements to calculate MFCC features per window.
-         * @param[in]   mfccWindowStride   Stride (in number of elements) for moving the MFCC window.
-         */
-        AsrPreProcess(TfLiteTensor* inputTensor,
-                      uint32_t  numMfccFeatures,
-                      uint32_t  numFeatureFrames,
-                      uint32_t  mfccWindowLen,
-                      uint32_t  mfccWindowStride);
-
-        /**
-         * @brief       Calculates the features required from audio data. This
-         *              includes MFCC, first and second order deltas,
-         *              normalisation and finally, quantisation. The tensor is
-         *              populated with features from a given window placed along
-         *              in a single row.
-         * @param[in]   audioData      Pointer to the first element of audio data.
-         * @param[in]   audioDataLen   Number of elements in the audio data.
-         * @return      true if successful, false in case of error.
-         */
-        bool DoPreProcess(const void* audioData, size_t audioDataLen) override;
-
-    protected:
-         /**
-          * @brief Computes the first and second order deltas for the
-          *        MFCC buffers - they are assumed to be populated.
-          *
-          * @param[in]  mfcc     MFCC buffers.
-          * @param[out] delta1   Result of the first diff computation.
-          * @param[out] delta2   Result of the second diff computation.
-          * @return     true if successful, false otherwise.
-          */
-         static bool ComputeDeltas(Array2d<float>& mfcc,
-                                   Array2d<float>& delta1,
-                                   Array2d<float>& delta2);
-
-        /**
-         * @brief           Given a 2D vector of floats, rescale it to have mean of 0 and
-        *                   standard deviation of 1.
-         * @param[in,out]   vec   Vector of vector of floats.
-         */
-        static void StandardizeVecF32(Array2d<float>& vec);
-
-        /**
-         * @brief   Standardizes all the MFCC and delta buffers to have mean 0 and std. dev 1.
-         */
-        void Standarize();
-
-        /**
-         * @brief       Given the quantisation and data type limits, computes
-         *              the quantised values of a floating point input data.
-         * @param[in]   elem          Element to be quantised.
-         * @param[in]   quantScale    Scale.
-         * @param[in]   quantOffset   Offset.
-         * @param[in]   minVal        Numerical limit - minimum.
-         * @param[in]   maxVal        Numerical limit - maximum.
-         * @return      Floating point quantised value.
-         */
-        static float GetQuantElem(
-                float     elem,
-                float     quantScale,
-                int       quantOffset,
-                float     minVal,
-                float     maxVal);
-
-        /**
-         * @brief       Quantises the MFCC and delta buffers, and places them
-         *              in the output buffer. While doing so, it transposes
-         *              the data. Reason: Buffers in this class are arranged
-         *              for "time" axis to be row major. Primary reason for
-         *              this being the convolution speed up (as we can use
-         *              contiguous memory). The output, however, requires the
-         *              time axis to be in column major arrangement.
-         * @param[in]   outputBuf     Pointer to the output buffer.
-         * @param[in]   outputBufSz   Output buffer's size.
-         * @param[in]   quantScale    Quantisation scale.
-         * @param[in]   quantOffset   Quantisation offset.
-         */
-        template <typename T>
-        bool Quantise(
-                T*              outputBuf,
-                const uint32_t  outputBufSz,
-                const float     quantScale,
-                const int       quantOffset)
-        {
-            /* Check the output size will fit everything. */
-            if (outputBufSz < (this->m_mfccBuf.size(0) * 3 * sizeof(T))) {
-                printf_err("Tensor size too small for features\n");
-                return false;
-            }
-
-            /* Populate. */
-            T* outputBufMfcc = outputBuf;
-            T* outputBufD1 = outputBuf + this->m_numMfccFeats;
-            T* outputBufD2 = outputBufD1 + this->m_numMfccFeats;
-            const uint32_t ptrIncr = this->m_numMfccFeats * 2;  /* (3 vectors - 1 vector) */
-
-            const float minVal = std::numeric_limits<T>::min();
-            const float maxVal = std::numeric_limits<T>::max();
-
-            /* Need to transpose while copying and concatenating the tensor. */
-            for (uint32_t j = 0; j < this->m_numFeatureFrames; ++j) {
-                for (uint32_t i = 0; i < this->m_numMfccFeats; ++i) {
-                    *outputBufMfcc++ = static_cast<T>(AsrPreProcess::GetQuantElem(
-                            this->m_mfccBuf(i, j), quantScale,
-                            quantOffset, minVal, maxVal));
-                    *outputBufD1++ = static_cast<T>(AsrPreProcess::GetQuantElem(
-                            this->m_delta1Buf(i, j), quantScale,
-                            quantOffset, minVal, maxVal));
-                    *outputBufD2++ = static_cast<T>(AsrPreProcess::GetQuantElem(
-                            this->m_delta2Buf(i, j), quantScale,
-                            quantOffset, minVal, maxVal));
-                }
-                outputBufMfcc += ptrIncr;
-                outputBufD1 += ptrIncr;
-                outputBufD2 += ptrIncr;
-            }
-
-            return true;
-        }
-
-    private:
-        audio::Wav2LetterMFCC   m_mfcc;          /* MFCC instance. */
-        TfLiteTensor*           m_inputTensor;   /* Model input tensor. */
-
-        /* Actual buffers to be populated. */
-        Array2d<float>   m_mfccBuf;              /* Contiguous buffer 1D: MFCC */
-        Array2d<float>   m_delta1Buf;            /* Contiguous buffer 1D: Delta 1 */
-        Array2d<float>   m_delta2Buf;            /* Contiguous buffer 1D: Delta 2 */
-
-        uint32_t         m_mfccWindowLen;        /* Window length for MFCC. */
-        uint32_t         m_mfccWindowStride;     /* Window stride len for MFCC. */
-        uint32_t         m_numMfccFeats;         /* Number of MFCC features per window. */
-        uint32_t         m_numFeatureFrames;     /* How many sets of m_numMfccFeats. */
-        AudioWindow      m_mfccSlidingWindow;    /* Sliding window to calculate MFCCs. */
-
-    };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* KWS_ASR_WAV2LETTER_PREPROCESS_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/AsrClassifier.cc b/source/use_case/kws_asr/src/AsrClassifier.cc
deleted file mode 100644
index 9c18b14..0000000
--- a/source/use_case/kws_asr/src/AsrClassifier.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "AsrClassifier.hpp"
-
-#include "log_macros.h"
-#include "TensorFlowLiteMicro.hpp"
-#include "Wav2LetterModel.hpp"
-
-template<typename T>
-bool arm::app::AsrClassifier::GetTopResults(TfLiteTensor* tensor,
-                                            std::vector<ClassificationResult>& vecResults,
-                                            const std::vector <std::string>& labels, double scale, double zeroPoint)
-{
-    const uint32_t nElems = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputRowsIdx];
-    const uint32_t nLetters = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
-
-    if (nLetters != labels.size()) {
-        printf("Output size doesn't match the labels' size\n");
-        return false;
-    }
-
-    /* NOTE: tensor's size verification against labels should be
-     *       checked by the calling/public function. */
-    if (nLetters < 1) {
-        return false;
-    }
-
-    /* Final results' container. */
-    vecResults = std::vector<ClassificationResult>(nElems);
-
-    T* tensorData = tflite::GetTensorData<T>(tensor);
-
-    /* Get the top 1 results. */
-    for (uint32_t i = 0, row = 0; i < nElems; ++i, row+=nLetters) {
-        std::pair<T, uint32_t> top_1 = std::make_pair(tensorData[row], 0);
-
-        for (uint32_t j = 1; j < nLetters; ++j) {
-            if (top_1.first < tensorData[row + j]) {
-                top_1.first = tensorData[row + j];
-                top_1.second = j;
-            }
-        }
-
-        double score = static_cast<int> (top_1.first);
-        vecResults[i].m_normalisedVal = scale * (score - zeroPoint);
-        vecResults[i].m_label = labels[top_1.second];
-        vecResults[i].m_labelIdx = top_1.second;
-    }
-
-    return true;
-}
-template bool arm::app::AsrClassifier::GetTopResults<uint8_t>(TfLiteTensor* tensor,
-                                                              std::vector<ClassificationResult>& vecResults,
-                                                              const std::vector <std::string>& labels, double scale, double zeroPoint);
-template bool arm::app::AsrClassifier::GetTopResults<int8_t>(TfLiteTensor* tensor,
-                                                             std::vector<ClassificationResult>& vecResults,
-                                                             const std::vector <std::string>& labels, double scale, double zeroPoint);
-
-bool arm::app::AsrClassifier::GetClassificationResults(
-            TfLiteTensor* outputTensor,
-            std::vector<ClassificationResult>& vecResults,
-            const std::vector <std::string>& labels, uint32_t topNCount, bool use_softmax)
-{
-        UNUSED(use_softmax);
-        vecResults.clear();
-
-        constexpr int minTensorDims = static_cast<int>(
-            (arm::app::Wav2LetterModel::ms_outputRowsIdx > arm::app::Wav2LetterModel::ms_outputColsIdx)?
-             arm::app::Wav2LetterModel::ms_outputRowsIdx : arm::app::Wav2LetterModel::ms_outputColsIdx);
-
-        constexpr uint32_t outColsIdx = arm::app::Wav2LetterModel::ms_outputColsIdx;
-
-        /* Sanity checks. */
-        if (outputTensor == nullptr) {
-            printf_err("Output vector is null pointer.\n");
-            return false;
-        } else if (outputTensor->dims->size < minTensorDims) {
-            printf_err("Output tensor expected to be 3D (1, m, n)\n");
-            return false;
-        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) < topNCount) {
-            printf_err("Output vectors are smaller than %" PRIu32 "\n", topNCount);
-            return false;
-        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) != labels.size()) {
-            printf("Output size doesn't match the labels' size\n");
-            return false;
-        }
-
-        if (topNCount != 1) {
-            warn("TopNCount value ignored in this implementation\n");
-        }
-
-        /* To return the floating point values, we need quantization parameters. */
-        QuantParams quantParams = GetTensorQuantParams(outputTensor);
-
-        bool resultState;
-
-        switch (outputTensor->type) {
-            case kTfLiteUInt8:
-                resultState = this->GetTopResults<uint8_t>(
-                        outputTensor, vecResults,
-                        labels, quantParams.scale,
-                        quantParams.offset);
-                break;
-            case kTfLiteInt8:
-                resultState = this->GetTopResults<int8_t>(
-                        outputTensor, vecResults,
-                        labels, quantParams.scale,
-                        quantParams.offset);
-                break;
-            default:
-                printf_err("Tensor type %s not supported by classifier\n",
-                    TfLiteTypeGetName(outputTensor->type));
-                return false;
-        }
-
-        if (!resultState) {
-            printf_err("Failed to get sorted set\n");
-            return false;
-        }
-
-        return true;
-}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/KwsProcessing.cc b/source/use_case/kws_asr/src/KwsProcessing.cc
deleted file mode 100644
index 328709d..0000000
--- a/source/use_case/kws_asr/src/KwsProcessing.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "KwsProcessing.hpp"
-#include "ImageUtils.hpp"
-#include "log_macros.h"
-#include "MicroNetKwsModel.hpp"
-
-namespace arm {
-namespace app {
-
-    KwsPreProcess::KwsPreProcess(TfLiteTensor* inputTensor, size_t numFeatures, size_t numMfccFrames,
-            int mfccFrameLength, int mfccFrameStride
-        ):
-        m_inputTensor{inputTensor},
-        m_mfccFrameLength{mfccFrameLength},
-        m_mfccFrameStride{mfccFrameStride},
-        m_numMfccFrames{numMfccFrames},
-        m_mfcc{audio::MicroNetKwsMFCC(numFeatures, mfccFrameLength)}
-    {
-        this->m_mfcc.Init();
-
-        /* Deduce the data length required for 1 inference from the network parameters. */
-        this->m_audioDataWindowSize = this->m_numMfccFrames * this->m_mfccFrameStride +
-                (this->m_mfccFrameLength - this->m_mfccFrameStride);
-
-        /* Creating an MFCC feature sliding window for the data required for 1 inference. */
-        this->m_mfccSlidingWindow = audio::SlidingWindow<const int16_t>(nullptr, this->m_audioDataWindowSize,
-                this->m_mfccFrameLength, this->m_mfccFrameStride);
-
-        /* For longer audio clips we choose to move by half the audio window size
-         * => for a 1 second window size there is an overlap of 0.5 seconds. */
-        this->m_audioDataStride = this->m_audioDataWindowSize / 2;
-
-        /* To have the previously calculated features re-usable, stride must be multiple
-         * of MFCC features window stride. Reduce stride through audio if needed. */
-        if (0 != this->m_audioDataStride % this->m_mfccFrameStride) {
-            this->m_audioDataStride -= this->m_audioDataStride % this->m_mfccFrameStride;
-        }
-
-        this->m_numMfccVectorsInAudioStride = this->m_audioDataStride / this->m_mfccFrameStride;
-
-        /* Calculate number of the feature vectors in the window overlap region.
-         * These feature vectors will be reused.*/
-        this->m_numReusedMfccVectors = this->m_mfccSlidingWindow.TotalStrides() + 1
-                - this->m_numMfccVectorsInAudioStride;
-
-        /* Construct feature calculation function. */
-        this->m_mfccFeatureCalculator = GetFeatureCalculator(this->m_mfcc, this->m_inputTensor,
-                                                             this->m_numReusedMfccVectors);
-
-        if (!this->m_mfccFeatureCalculator) {
-            printf_err("Feature calculator not initialized.");
-        }
-    }
-
-    bool KwsPreProcess::DoPreProcess(const void* data, size_t inputSize)
-    {
-        UNUSED(inputSize);
-        if (data == nullptr) {
-            printf_err("Data pointer is null");
-        }
-
-        /* Set the features sliding window to the new address. */
-        auto input = static_cast<const int16_t*>(data);
-        this->m_mfccSlidingWindow.Reset(input);
-
-        /* Cache is only usable if we have more than 1 inference in an audio clip. */
-        bool useCache = this->m_audioWindowIndex > 0 && this->m_numReusedMfccVectors > 0;
-
-        /* Use a sliding window to calculate MFCC features frame by frame. */
-        while (this->m_mfccSlidingWindow.HasNext()) {
-            const int16_t* mfccWindow = this->m_mfccSlidingWindow.Next();
-
-            std::vector<int16_t> mfccFrameAudioData = std::vector<int16_t>(mfccWindow,
-                    mfccWindow + this->m_mfccFrameLength);
-
-            /* Compute features for this window and write them to input tensor. */
-            this->m_mfccFeatureCalculator(mfccFrameAudioData, this->m_mfccSlidingWindow.Index(),
-                                          useCache, this->m_numMfccVectorsInAudioStride);
-        }
-
-        debug("Input tensor populated \n");
-
-        return true;
-    }
-
-    /**
-     * @brief Generic feature calculator factory.
-     *
-     * Returns lambda function to compute features using features cache.
-     * Real features math is done by a lambda function provided as a parameter.
-     * Features are written to input tensor memory.
-     *
-     * @tparam T                Feature vector type.
-     * @param[in] inputTensor   Model input tensor pointer.
-     * @param[in] cacheSize     Number of feature vectors to cache. Defined by the sliding window overlap.
-     * @param[in] compute       Features calculator function.
-     * @return                  Lambda function to compute features.
-     */
-    template<class T>
-    std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
-    KwsPreProcess::FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
-                               std::function<std::vector<T> (std::vector<int16_t>& )> compute)
-    {
-        /* Feature cache to be captured by lambda function. */
-        static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
-
-        return [=](std::vector<int16_t>& audioDataWindow,
-                   size_t index,
-                   bool useCache,
-                   size_t featuresOverlapIndex)
-        {
-            T* tensorData = tflite::GetTensorData<T>(inputTensor);
-            std::vector<T> features;
-
-            /* Reuse features from cache if cache is ready and sliding windows overlap.
-             * Overlap is in the beginning of sliding window with a size of a feature cache. */
-            if (useCache && index < featureCache.size()) {
-                features = std::move(featureCache[index]);
-            } else {
-                features = std::move(compute(audioDataWindow));
-            }
-            auto size = features.size();
-            auto sizeBytes = sizeof(T) * size;
-            std::memcpy(tensorData + (index * size), features.data(), sizeBytes);
-
-            /* Start renewing cache as soon iteration goes out of the windows overlap. */
-            if (index >= featuresOverlapIndex) {
-                featureCache[index - featuresOverlapIndex] = std::move(features);
-            }
-        };
-    }
-
-    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
-    KwsPreProcess::FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
-                                       size_t cacheSize,
-                                       std::function<std::vector<int8_t> (std::vector<int16_t>&)> compute);
-
-    template std::function<void(std::vector<int16_t>&, size_t, bool, size_t)>
-    KwsPreProcess::FeatureCalc<float>(TfLiteTensor* inputTensor,
-                                      size_t cacheSize,
-                                      std::function<std::vector<float>(std::vector<int16_t>&)> compute);
-
-
-    std::function<void (std::vector<int16_t>&, int, bool, size_t)>
-    KwsPreProcess::GetFeatureCalculator(audio::MicroNetKwsMFCC& mfcc, TfLiteTensor* inputTensor, size_t cacheSize)
-    {
-        std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> mfccFeatureCalc;
-
-        TfLiteQuantization quant = inputTensor->quantization;
-
-        if (kTfLiteAffineQuantization == quant.type) {
-            auto *quantParams = (TfLiteAffineQuantization *) quant.params;
-            const float quantScale = quantParams->scale->data[0];
-            const int quantOffset = quantParams->zero_point->data[0];
-
-            switch (inputTensor->type) {
-                case kTfLiteInt8: {
-                    mfccFeatureCalc = this->FeatureCalc<int8_t>(inputTensor,
-                                                          cacheSize,
-                                                          [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
-                                                              return mfcc.MfccComputeQuant<int8_t>(audioDataWindow,
-                                                                                                   quantScale,
-                                                                                                   quantOffset);
-                                                          }
-                    );
-                    break;
-                }
-                default:
-                printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
-            }
-        } else {
-            mfccFeatureCalc = this->FeatureCalc<float>(inputTensor, cacheSize,
-                    [&mfcc](std::vector<int16_t>& audioDataWindow) {
-                return mfcc.MfccCompute(audioDataWindow); }
-                );
-        }
-        return mfccFeatureCalc;
-    }
-
-    KwsPostProcess::KwsPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
-                                   const std::vector<std::string>& labels,
-                                   std::vector<ClassificationResult>& results)
-            :m_outputTensor{outputTensor},
-             m_kwsClassifier{classifier},
-             m_labels{labels},
-             m_results{results}
-    {}
-
-    bool KwsPostProcess::DoPostProcess()
-    {
-        return this->m_kwsClassifier.GetClassificationResults(
-                this->m_outputTensor, this->m_results,
-                this->m_labels, 1, true);
-    }
-
-} /* namespace app */
-} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/MainLoop.cc b/source/use_case/kws_asr/src/MainLoop.cc
index f1d97a0..2365264 100644
--- a/source/use_case/kws_asr/src/MainLoop.cc
+++ b/source/use_case/kws_asr/src/MainLoop.cc
@@ -23,7 +23,24 @@
 #include "Wav2LetterModel.hpp"      /* ASR model class for running inference. */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+namespace app {
+    static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+    namespace asr {
+        extern uint8_t* GetModelPointer();
+        extern size_t GetModelLen();
+    }
+
+    namespace kws {
+        extern uint8_t* GetModelPointer();
+        extern size_t GetModelLen();
+    }
+} /* namespace app */
+} /* namespace arm */
 
 using KwsClassifier = arm::app::Classifier;
 
@@ -60,14 +77,29 @@
     arm::app::Wav2LetterModel asrModel;
 
     /* Load the models. */
-    if (!kwsModel.Init()) {
+    if (!kwsModel.Init(arm::app::tensorArena,
+                       sizeof(arm::app::tensorArena),
+                       arm::app::kws::GetModelPointer(),
+                       arm::app::kws::GetModelLen())) {
         printf_err("Failed to initialise KWS model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (kwsModel.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the KWS model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Initialise the asr model using the same allocator from KWS
      * to re-use the tensor arena. */
-    if (!asrModel.Init(kwsModel.GetAllocator())) {
+    if (!asrModel.Init(arm::app::tensorArena,
+                       sizeof(arm::app::tensorArena),
+                       arm::app::asr::GetModelPointer(),
+                       arm::app::asr::GetModelLen(),
+                       kwsModel.GetAllocator())) {
         printf_err("Failed to initialise ASR model\n");
         return;
     } else if (!VerifyTensorDimensions(asrModel)) {
@@ -75,6 +107,14 @@
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (asrModel.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the ASR model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
diff --git a/source/use_case/kws_asr/src/MicroNetKwsModel.cc b/source/use_case/kws_asr/src/MicroNetKwsModel.cc
deleted file mode 100644
index 663faa0..0000000
--- a/source/use_case/kws_asr/src/MicroNetKwsModel.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "MicroNetKwsModel.hpp"
-#include "log_macros.h"
-
-namespace arm {
-namespace app {
-namespace kws {
-    extern uint8_t* GetModelPointer();
-    extern size_t GetModelLen();
-} /* namespace kws */
-} /* namespace app */
-} /* namespace arm */
-
-const tflite::MicroOpResolver& arm::app::MicroNetKwsModel::GetOpResolver()
-{
-    return this->m_opResolver;
-}
-
-bool arm::app::MicroNetKwsModel::EnlistOperations()
-{
-    this->m_opResolver.AddAveragePool2D();
-    this->m_opResolver.AddConv2D();
-    this->m_opResolver.AddDepthwiseConv2D();
-    this->m_opResolver.AddFullyConnected();
-    this->m_opResolver.AddRelu();
-    this->m_opResolver.AddReshape();
-
-#if defined(ARM_NPU)
-    if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
-        info("Added %s support to op resolver\n",
-            tflite::GetString_ETHOSU());
-    } else {
-        printf_err("Failed to add Arm NPU support to op resolver.");
-        return false;
-    }
-#endif /* ARM_NPU */
-    return true;
-}
-
-const uint8_t* arm::app::MicroNetKwsModel::ModelPointer()
-{
-    return arm::app::kws::GetModelPointer();
-}
-
-size_t arm::app::MicroNetKwsModel::ModelSize()
-{
-    return arm::app::kws::GetModelLen();
-}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/OutputDecode.cc b/source/use_case/kws_asr/src/OutputDecode.cc
deleted file mode 100644
index 41fbe07..0000000
--- a/source/use_case/kws_asr/src/OutputDecode.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "OutputDecode.hpp"
-
-namespace arm {
-namespace app {
-namespace audio {
-namespace asr {
-
-    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults)
-    {
-        std::string CleanOutputBuffer;
-
-        for (size_t i = 0; i < vecResults.size(); ++i)  /* For all elements in vector. */
-        {
-            while (i+1 < vecResults.size() &&
-                   vecResults[i].m_label == vecResults[i+1].m_label)  /* While the current element is equal to the next, ignore it and move on. */
-            {
-                ++i;
-            }
-            if (vecResults[i].m_label != "$")  /* $ is a character used to represent unknown and double characters so should not be in output. */
-            {
-                CleanOutputBuffer += vecResults[i].m_label;  /* If the element is different to the next, it will be appended to CleanOutputBuffer. */
-            }
-        }
-
-        return CleanOutputBuffer;  /* Return string type containing clean output. */
-    }
-
-} /* namespace asr */
-} /* namespace audio */
-} /* namespace app */
-} /* namespace arm */
diff --git a/source/use_case/kws_asr/src/UseCaseHandler.cc b/source/use_case/kws_asr/src/UseCaseHandler.cc
index 01aefae..9427ae0 100644
--- a/source/use_case/kws_asr/src/UseCaseHandler.cc
+++ b/source/use_case/kws_asr/src/UseCaseHandler.cc
@@ -25,6 +25,7 @@
 #include "MicroNetKwsMfcc.hpp"
 #include "Classifier.hpp"
 #include "KwsResult.hpp"
+#include "Wav2LetterModel.hpp"
 #include "Wav2LetterMfcc.hpp"
 #include "Wav2LetterPreprocess.hpp"
 #include "Wav2LetterPostprocess.hpp"
@@ -470,4 +471,4 @@
     }
 
 } /* namespace app */
-} /* namespace arm */
\ No newline at end of file
+} /* namespace arm */
diff --git a/source/use_case/kws_asr/src/Wav2LetterMfcc.cc b/source/use_case/kws_asr/src/Wav2LetterMfcc.cc
deleted file mode 100644
index f2c50f3..0000000
--- a/source/use_case/kws_asr/src/Wav2LetterMfcc.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "Wav2LetterMfcc.hpp"
-
-#include "PlatformMath.hpp"
-#include "log_macros.h"
-
-#include <cfloat>
-
-namespace arm {
-namespace app {
-namespace audio {
-
-    bool Wav2LetterMFCC::ApplyMelFilterBank(
-            std::vector<float>&                 fftVec,
-            std::vector<std::vector<float>>&    melFilterBank,
-            std::vector<uint32_t>&              filterBankFilterFirst,
-            std::vector<uint32_t>&              filterBankFilterLast,
-            std::vector<float>&                 melEnergies)
-    {
-        const size_t numBanks = melEnergies.size();
-
-        if (numBanks != filterBankFilterFirst.size() ||
-                numBanks != filterBankFilterLast.size()) {
-            printf_err("unexpected filter bank lengths\n");
-            return false;
-        }
-
-        for (size_t bin = 0; bin < numBanks; ++bin) {
-            auto filterBankIter = melFilterBank[bin].begin();
-            auto end = melFilterBank[bin].end();
-            /* Avoid log of zero at later stages, same value used in librosa.
-             * The number was used during our default wav2letter model training. */
-            float melEnergy = 1e-10;
-            const uint32_t firstIndex = filterBankFilterFirst[bin];
-            const uint32_t lastIndex = std::min<uint32_t>(filterBankFilterLast[bin], fftVec.size() - 1);
-
-            for (uint32_t i = firstIndex; i <= lastIndex && filterBankIter != end; ++i) {
-                melEnergy += (*filterBankIter++ * fftVec[i]);
-            }
-
-            melEnergies[bin] = melEnergy;
-        }
-
-        return true;
-    }
-
-    void Wav2LetterMFCC::ConvertToLogarithmicScale(
-                            std::vector<float>& melEnergies)
-    {
-        float maxMelEnergy = -FLT_MAX;
-
-        /* Container for natural logarithms of mel energies. */
-        std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
-
-        /* Because we are taking natural logs, we need to multiply by log10(e).
-         * Also, for wav2letter model, we scale our log10 values by 10. */
-        constexpr float multiplier = 10.0 *  /* Default scalar. */
-                                      0.4342944819032518;  /* log10f(std::exp(1.0))*/
-
-        /* Take log of the whole vector. */
-        math::MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
-
-        /* Scale the log values and get the max. */
-        for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
-                  iterM != melEnergies.end() && iterL != vecLogEnergies.end(); ++iterM, ++iterL) {
-
-            *iterM = *iterL * multiplier;
-
-            /* Save the max mel energy. */
-            if (*iterM > maxMelEnergy) {
-                maxMelEnergy = *iterM;
-            }
-        }
-
-        /* Clamp the mel energies. */
-        constexpr float maxDb = 80.0;
-        const float clampLevelLowdB = maxMelEnergy - maxDb;
-        for (float & melEnergie : melEnergies) {
-            melEnergie = std::max(melEnergie, clampLevelLowdB);
-        }
-    }
-
-    std::vector<float> Wav2LetterMFCC::CreateDCTMatrix(
-                                        const int32_t inputLength,
-                                        const int32_t coefficientCount)
-    {
-        std::vector<float> dctMatix(inputLength * coefficientCount);
-
-        /* Orthonormal normalization. */
-        const float normalizerK0 = 2 * math::MathUtils::SqrtF32(1.0f /
-                                        static_cast<float>(4*inputLength));
-        const float normalizer = 2 * math::MathUtils::SqrtF32(1.0f /
-                                        static_cast<float>(2*inputLength));
-
-        const float angleIncr = M_PI/inputLength;
-        float angle = angleIncr;  /* We start using it at k = 1 loop. */
-
-        /* First row of DCT will use normalizer K0 */
-        for (int32_t n = 0; n < inputLength; ++n) {
-            dctMatix[n] = normalizerK0  /* cos(0) = 1 */;
-        }
-
-        /* Second row (index = 1) onwards, we use standard normalizer. */
-        for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength) {
-            for (int32_t n = 0; n < inputLength; ++n) {
-                dctMatix[m+n] = normalizer *
-                    math::MathUtils::CosineF32((n + 0.5f) * angle);
-            }
-            angle += angleIncr;
-        }
-        return dctMatix;
-    }
-
-    float Wav2LetterMFCC::GetMelFilterBankNormaliser(
-                                    const float&    leftMel,
-                                    const float&    rightMel,
-                                    const bool      useHTKMethod)
-    {
-        /* Slaney normalization for mel weights. */
-        return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
-                MFCC::InverseMelScale(leftMel, useHTKMethod)));
-    }
-
-} /* namespace audio */
-} /* namespace app */
-} /* namespace arm */
diff --git a/source/use_case/kws_asr/src/Wav2LetterModel.cc b/source/use_case/kws_asr/src/Wav2LetterModel.cc
deleted file mode 100644
index 52bd23a..0000000
--- a/source/use_case/kws_asr/src/Wav2LetterModel.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "Wav2LetterModel.hpp"
-#include "log_macros.h"
-
-namespace arm {
-namespace app {
-namespace asr {
-    extern uint8_t* GetModelPointer();
-    extern size_t GetModelLen();
-}
-} /* namespace app */
-} /* namespace arm */
-
-const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
-{
-    return this->m_opResolver;
-}
-
-bool arm::app::Wav2LetterModel::EnlistOperations()
-{
-    this->m_opResolver.AddConv2D();
-    this->m_opResolver.AddLeakyRelu();
-    this->m_opResolver.AddSoftmax();
-    this->m_opResolver.AddReshape();
-
-#if defined(ARM_NPU)
-    if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
-        info("Added %s support to op resolver\n",
-            tflite::GetString_ETHOSU());
-    } else {
-        printf_err("Failed to add Arm NPU support to op resolver.");
-        return false;
-    }
-#endif /* ARM_NPU */
-    return true;
-}
-
-const uint8_t* arm::app::Wav2LetterModel::ModelPointer()
-{
-    return arm::app::asr::GetModelPointer();
-}
-
-size_t arm::app::Wav2LetterModel::ModelSize()
-{
-    return arm::app::asr::GetModelLen();
-}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc b/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc
deleted file mode 100644
index 42f434e..0000000
--- a/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "Wav2LetterPostprocess.hpp"
-
-#include "Wav2LetterModel.hpp"
-#include "log_macros.h"
-
-#include <cmath>
-
-namespace arm {
-namespace app {
-
-    AsrPostProcess::AsrPostProcess(TfLiteTensor* outputTensor, AsrClassifier& classifier,
-            const std::vector<std::string>& labels, std::vector<ClassificationResult>& results,
-            const uint32_t outputContextLen,
-            const uint32_t blankTokenIdx, const uint32_t reductionAxisIdx
-            ):
-            m_classifier(classifier),
-            m_outputTensor(outputTensor),
-            m_labels{labels},
-            m_results(results),
-            m_outputContextLen(outputContextLen),
-            m_countIterations(0),
-            m_blankTokenIdx(blankTokenIdx),
-            m_reductionAxisIdx(reductionAxisIdx)
-    {
-        this->m_outputInnerLen = AsrPostProcess::GetOutputInnerLen(this->m_outputTensor, this->m_outputContextLen);
-        this->m_totalLen = (2 * this->m_outputContextLen + this->m_outputInnerLen);
-    }
-
-    bool AsrPostProcess::DoPostProcess()
-    {
-        /* Basic checks. */
-        if (!this->IsInputValid(this->m_outputTensor, this->m_reductionAxisIdx)) {
-            return false;
-        }
-
-        /* Irrespective of tensor type, we use unsigned "byte" */
-        auto* ptrData = tflite::GetTensorData<uint8_t>(this->m_outputTensor);
-        const uint32_t elemSz = AsrPostProcess::GetTensorElementSize(this->m_outputTensor);
-
-        /* Other sanity checks. */
-        if (0 == elemSz) {
-            printf_err("Tensor type not supported for post processing\n");
-            return false;
-        } else if (elemSz * this->m_totalLen > this->m_outputTensor->bytes) {
-            printf_err("Insufficient number of tensor bytes\n");
-            return false;
-        }
-
-        /* Which axis do we need to process? */
-        switch (this->m_reductionAxisIdx) {
-            case Wav2LetterModel::ms_outputRowsIdx:
-                this->EraseSectionsRowWise(
-                        ptrData, elemSz * this->m_outputTensor->dims->data[Wav2LetterModel::ms_outputColsIdx],
-                        this->m_lastIteration);
-                break;
-            default:
-                printf_err("Unsupported axis index: %" PRIu32 "\n", this->m_reductionAxisIdx);
-                return false;
-        }
-        this->m_classifier.GetClassificationResults(this->m_outputTensor,
-                this->m_results, this->m_labels, 1);
-
-        return true;
-    }
-
-    bool AsrPostProcess::IsInputValid(TfLiteTensor* tensor, const uint32_t axisIdx) const
-    {
-        if (nullptr == tensor) {
-            return false;
-        }
-
-        if (static_cast<int>(axisIdx) >= tensor->dims->size) {
-            printf_err("Invalid axis index: %" PRIu32 "; Max: %d\n",
-                axisIdx, tensor->dims->size);
-            return false;
-        }
-
-        if (static_cast<int>(this->m_totalLen) !=
-                             tensor->dims->data[axisIdx]) {
-            printf_err("Unexpected tensor dimension for axis %d, got %d, \n",
-                axisIdx, tensor->dims->data[axisIdx]);
-            return false;
-        }
-
-        return true;
-    }
-
-    uint32_t AsrPostProcess::GetTensorElementSize(TfLiteTensor* tensor)
-    {
-        switch(tensor->type) {
-            case kTfLiteUInt8:
-            case kTfLiteInt8:
-                return 1;
-            case kTfLiteInt16:
-                return 2;
-            case kTfLiteInt32:
-            case kTfLiteFloat32:
-                return 4;
-            default:
-                printf_err("Unsupported tensor type %s\n",
-                    TfLiteTypeGetName(tensor->type));
-        }
-
-        return 0;
-    }
-
-    bool AsrPostProcess::EraseSectionsRowWise(
-            uint8_t*         ptrData,
-            const uint32_t   strideSzBytes,
-            const bool       lastIteration)
-    {
-        /* In this case, the "zero-ing" is quite simple as the region
-         * to be zeroed sits in contiguous memory (row-major). */
-        const uint32_t eraseLen = strideSzBytes * this->m_outputContextLen;
-
-        /* Erase left context? */
-        if (this->m_countIterations > 0) {
-            /* Set output of each classification window to the blank token. */
-            std::memset(ptrData, 0, eraseLen);
-            for (size_t windowIdx = 0; windowIdx < this->m_outputContextLen; windowIdx++) {
-                ptrData[windowIdx*strideSzBytes + this->m_blankTokenIdx] = 1;
-            }
-        }
-
-        /* Erase right context? */
-        if (false == lastIteration) {
-            uint8_t* rightCtxPtr = ptrData + (strideSzBytes * (this->m_outputContextLen + this->m_outputInnerLen));
-            /* Set output of each classification window to the blank token. */
-            std::memset(rightCtxPtr, 0, eraseLen);
-            for (size_t windowIdx = 0; windowIdx < this->m_outputContextLen; windowIdx++) {
-                rightCtxPtr[windowIdx*strideSzBytes + this->m_blankTokenIdx] = 1;
-            }
-        }
-
-        if (lastIteration) {
-            this->m_countIterations = 0;
-        } else {
-            ++this->m_countIterations;
-        }
-
-        return true;
-    }
-
-    uint32_t AsrPostProcess::GetNumFeatureVectors(const Model& model)
-    {
-        TfLiteTensor* inputTensor = model.GetInputTensor(0);
-        const int inputRows = std::max(inputTensor->dims->data[Wav2LetterModel::ms_inputRowsIdx], 0);
-        if (inputRows == 0) {
-            printf_err("Error getting number of input rows for axis: %" PRIu32 "\n",
-                    Wav2LetterModel::ms_inputRowsIdx);
-        }
-        return inputRows;
-    }
-
-    uint32_t AsrPostProcess::GetOutputInnerLen(const TfLiteTensor* outputTensor, const uint32_t outputCtxLen)
-    {
-        const uint32_t outputRows = std::max(outputTensor->dims->data[Wav2LetterModel::ms_outputRowsIdx], 0);
-        if (outputRows == 0) {
-            printf_err("Error getting number of output rows for axis: %" PRIu32 "\n",
-                    Wav2LetterModel::ms_outputRowsIdx);
-        }
-
-        /* Watching for underflow. */
-        int innerLen = (outputRows - (2 * outputCtxLen));
-
-        return std::max(innerLen, 0);
-    }
-
-    uint32_t AsrPostProcess::GetOutputContextLen(const Model& model, const uint32_t inputCtxLen)
-    {
-        const uint32_t inputRows = AsrPostProcess::GetNumFeatureVectors(model);
-        const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
-        constexpr uint32_t ms_outputRowsIdx = Wav2LetterModel::ms_outputRowsIdx;
-
-        /* Check to make sure that the input tensor supports the above
-         * context and inner lengths. */
-        if (inputRows <= 2 * inputCtxLen || inputRows <= inputInnerLen) {
-            printf_err("Input rows not compatible with ctx of %" PRIu32 "\n",
-                       inputCtxLen);
-            return 0;
-        }
-
-        TfLiteTensor* outputTensor = model.GetOutputTensor(0);
-        const uint32_t outputRows = std::max(outputTensor->dims->data[ms_outputRowsIdx], 0);
-        if (outputRows == 0) {
-            printf_err("Error getting number of output rows for axis: %" PRIu32 "\n",
-                       Wav2LetterModel::ms_outputRowsIdx);
-            return 0;
-        }
-
-        const float inOutRowRatio = static_cast<float>(inputRows) /
-                                     static_cast<float>(outputRows);
-
-        return std::round(static_cast<float>(inputCtxLen) / inOutRowRatio);
-    }
-
-} /* namespace app */
-} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc b/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc
deleted file mode 100644
index 92b0631..0000000
--- a/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "Wav2LetterPreprocess.hpp"
-
-#include "PlatformMath.hpp"
-#include "TensorFlowLiteMicro.hpp"
-
-#include <algorithm>
-#include <cmath>
-
-namespace arm {
-namespace app {
-
-    AsrPreProcess::AsrPreProcess(TfLiteTensor* inputTensor, const uint32_t numMfccFeatures,
-                                 const uint32_t numFeatureFrames, const uint32_t mfccWindowLen,
-                                 const uint32_t mfccWindowStride
-            ):
-            m_mfcc(numMfccFeatures, mfccWindowLen),
-            m_inputTensor(inputTensor),
-            m_mfccBuf(numMfccFeatures, numFeatureFrames),
-            m_delta1Buf(numMfccFeatures, numFeatureFrames),
-            m_delta2Buf(numMfccFeatures, numFeatureFrames),
-            m_mfccWindowLen(mfccWindowLen),
-            m_mfccWindowStride(mfccWindowStride),
-            m_numMfccFeats(numMfccFeatures),
-            m_numFeatureFrames(numFeatureFrames)
-    {
-        if (numMfccFeatures > 0 && mfccWindowLen > 0) {
-            this->m_mfcc.Init();
-        }
-    }
-
-    bool AsrPreProcess::DoPreProcess(const void* audioData, const size_t audioDataLen)
-    {
-        this->m_mfccSlidingWindow = audio::SlidingWindow<const int16_t>(
-                static_cast<const int16_t*>(audioData), audioDataLen,
-                this->m_mfccWindowLen, this->m_mfccWindowStride);
-
-        uint32_t mfccBufIdx = 0;
-
-        std::fill(m_mfccBuf.begin(), m_mfccBuf.end(), 0.f);
-        std::fill(m_delta1Buf.begin(), m_delta1Buf.end(), 0.f);
-        std::fill(m_delta2Buf.begin(), m_delta2Buf.end(), 0.f);
-
-        /* While we can slide over the audio. */
-        while (this->m_mfccSlidingWindow.HasNext()) {
-            const int16_t* mfccWindow = this->m_mfccSlidingWindow.Next();
-            auto mfccAudioData = std::vector<int16_t>(
-                                        mfccWindow,
-                                        mfccWindow + this->m_mfccWindowLen);
-            auto mfcc = this->m_mfcc.MfccCompute(mfccAudioData);
-            for (size_t i = 0; i < this->m_mfccBuf.size(0); ++i) {
-                this->m_mfccBuf(i, mfccBufIdx) = mfcc[i];
-            }
-            ++mfccBufIdx;
-        }
-
-        /* Pad MFCC if needed by adding MFCC for zeros. */
-        if (mfccBufIdx != this->m_numFeatureFrames) {
-            std::vector<int16_t> zerosWindow = std::vector<int16_t>(this->m_mfccWindowLen, 0);
-            std::vector<float> mfccZeros = this->m_mfcc.MfccCompute(zerosWindow);
-
-            while (mfccBufIdx != this->m_numFeatureFrames) {
-                memcpy(&this->m_mfccBuf(0, mfccBufIdx),
-                       mfccZeros.data(), sizeof(float) * m_numMfccFeats);
-                ++mfccBufIdx;
-            }
-        }
-
-        /* Compute first and second order deltas from MFCCs. */
-        AsrPreProcess::ComputeDeltas(this->m_mfccBuf, this->m_delta1Buf, this->m_delta2Buf);
-
-        /* Standardize calculated features. */
-        this->Standarize();
-
-        /* Quantise. */
-        QuantParams quantParams = GetTensorQuantParams(this->m_inputTensor);
-
-        if (0 == quantParams.scale) {
-            printf_err("Quantisation scale can't be 0\n");
-            return false;
-        }
-
-        switch(this->m_inputTensor->type) {
-            case kTfLiteUInt8:
-                return this->Quantise<uint8_t>(
-                        tflite::GetTensorData<uint8_t>(this->m_inputTensor), this->m_inputTensor->bytes,
-                        quantParams.scale, quantParams.offset);
-            case kTfLiteInt8:
-                return this->Quantise<int8_t>(
-                        tflite::GetTensorData<int8_t>(this->m_inputTensor), this->m_inputTensor->bytes,
-                        quantParams.scale, quantParams.offset);
-            default:
-                printf_err("Unsupported tensor type %s\n",
-                    TfLiteTypeGetName(this->m_inputTensor->type));
-        }
-
-        return false;
-    }
-
-    bool AsrPreProcess::ComputeDeltas(Array2d<float>& mfcc,
-                                      Array2d<float>& delta1,
-                                      Array2d<float>& delta2)
-    {
-        const std::vector <float> delta1Coeffs =
-            {6.66666667e-02,  5.00000000e-02,  3.33333333e-02,
-             1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
-            -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
-
-        const std::vector <float> delta2Coeffs =
-            {0.06060606,      0.01515152,     -0.01731602,
-            -0.03679654,     -0.04329004,     -0.03679654,
-            -0.01731602,      0.01515152,      0.06060606};
-
-        if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
-            mfcc.size(0) == 0 || mfcc.size(1) == 0) {
-            return false;
-        }
-
-        /* Get the middle index; coeff vec len should always be odd. */
-        const size_t coeffLen = delta1Coeffs.size();
-        const size_t fMidIdx = (coeffLen - 1)/2;
-        const size_t numFeatures = mfcc.size(0);
-        const size_t numFeatVectors = mfcc.size(1);
-
-        /* Iterate through features in MFCC vector. */
-        for (size_t i = 0; i < numFeatures; ++i) {
-            /* For each feature, iterate through time (t) samples representing feature evolution and
-             * calculate d/dt and d^2/dt^2, using 1D convolution with differential kernels.
-             * Convolution padding = valid, result size is `time length - kernel length + 1`.
-             * The result is padded with 0 from both sides to match the size of initial time samples data.
-             *
-             * For the small filter, conv1D implementation as a simple loop is efficient enough.
-             * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
-             */
-
-            for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j) {
-                float d1 = 0;
-                float d2 = 0;
-                const size_t mfccStIdx = j - fMidIdx;
-
-                for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m) {
-
-                    d1 +=  mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
-                    d2 +=  mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
-                }
-
-                delta1(i,j) = d1;
-                delta2(i,j) = d2;
-            }
-        }
-
-        return true;
-    }
-
-    void AsrPreProcess::StandardizeVecF32(Array2d<float>& vec)
-    {
-        auto mean = math::MathUtils::MeanF32(vec.begin(), vec.totalSize());
-        auto stddev = math::MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
-
-        debug("Mean: %f, Stddev: %f\n", mean, stddev);
-        if (stddev == 0) {
-            std::fill(vec.begin(), vec.end(), 0);
-        } else {
-            const float stddevInv = 1.f/stddev;
-            const float normalisedMean = mean/stddev;
-
-            auto NormalisingFunction = [=](float& value) {
-                value = value * stddevInv - normalisedMean;
-            };
-            std::for_each(vec.begin(), vec.end(), NormalisingFunction);
-        }
-    }
-
-    void AsrPreProcess::Standarize()
-    {
-        AsrPreProcess::StandardizeVecF32(this->m_mfccBuf);
-        AsrPreProcess::StandardizeVecF32(this->m_delta1Buf);
-        AsrPreProcess::StandardizeVecF32(this->m_delta2Buf);
-    }
-
-    float AsrPreProcess::GetQuantElem(
-                const float     elem,
-                const float     quantScale,
-                const int       quantOffset,
-                const float     minVal,
-                const float     maxVal)
-    {
-        float val = std::round((elem/quantScale) + quantOffset);
-        return std::min<float>(std::max<float>(val, minVal), maxVal);
-    }
-
-} /* namespace app */
-} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/usecase.cmake b/source/use_case/kws_asr/usecase.cmake
index 40df4d7..59ef450 100644
--- a/source/use_case/kws_asr/usecase.cmake
+++ b/source/use_case/kws_asr/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the APIs to use for this use case
+list(APPEND ${use_case}_API_LIST "kws" "asr")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with WAV files, or path to a single WAV file, to use in the evaluation application."
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
@@ -145,4 +147,4 @@
         ${${use_case}_AUDIO_OFFSET}
         ${${use_case}_AUDIO_DURATION}
         ${${use_case}_AUDIO_RES_TYPE}
-        ${${use_case}_AUDIO_MIN_SAMPLES})
\ No newline at end of file
+        ${${use_case}_AUDIO_MIN_SAMPLES})
diff --git a/source/use_case/noise_reduction/src/MainLoop.cc b/source/use_case/noise_reduction/src/MainLoop.cc
index fd72127..4c74a48 100644
--- a/source/use_case/noise_reduction/src/MainLoop.cc
+++ b/source/use_case/noise_reduction/src/MainLoop.cc
@@ -18,7 +18,17 @@
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
 #include "RNNoiseModel.hpp"         /* Model class for running inference. */
 #include "InputFiles.hpp"           /* For input audio clips. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
 
 enum opcodes
 {
@@ -62,10 +72,22 @@
     constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
+
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
@@ -124,4 +146,4 @@
         }
     } while (executionSuccessful && bUseMenu);
     info("Main loop terminated.\n");
-}
\ No newline at end of file
+}
diff --git a/source/use_case/noise_reduction/usecase.cmake b/source/use_case/noise_reduction/usecase.cmake
index 8dfde58..0cd0761 100644
--- a/source/use_case/noise_reduction/usecase.cmake
+++ b/source/use_case/noise_reduction/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "noise_reduction")
 
 USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
     0x00200000
diff --git a/source/use_case/object_detection/src/MainLoop.cc b/source/use_case/object_detection/src/MainLoop.cc
index 4291164..d119501 100644
--- a/source/use_case/object_detection/src/MainLoop.cc
+++ b/source/use_case/object_detection/src/MainLoop.cc
@@ -19,7 +19,17 @@
 #include "YoloFastestModel.hpp"       /* Model class for running inference. */
 #include "UseCaseHandler.hpp"         /* Handlers for different user options. */
 #include "UseCaseCommonUtils.hpp"     /* Utils functions. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
 
 static void DisplayDetectionMenu()
 {
@@ -40,11 +50,22 @@
     arm::app::YoloFastestModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
diff --git a/source/use_case/object_detection/usecase.cmake b/source/use_case/object_detection/usecase.cmake
index 42c4f2c..850e7fc 100644
--- a/source/use_case/object_detection/usecase.cmake
+++ b/source/use_case/object_detection/usecase.cmake
@@ -14,6 +14,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #----------------------------------------------------------------------------
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "object_detection")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files to use, or path to a single image, in the evaluation application"
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
diff --git a/source/use_case/vww/src/MainLoop.cc b/source/use_case/vww/src/MainLoop.cc
index 041ea18..2161b0a 100644
--- a/source/use_case/vww/src/MainLoop.cc
+++ b/source/use_case/vww/src/MainLoop.cc
@@ -21,7 +21,17 @@
 #include "VisualWakeWordModel.hpp" /* Model class for running inference. */
 #include "UseCaseHandler.hpp"       /* Handlers for different user options. */
 #include "UseCaseCommonUtils.hpp"   /* Utils functions. */
-#include "log_macros.h"
+#include "log_macros.h"             /* Logging functions */
+#include "BufAttributes.hpp"        /* Buffer attributes to be applied */
+
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
 
 using ViusalWakeWordClassifier = arm::app::Classifier;
 
@@ -30,11 +40,22 @@
     arm::app::VisualWakeWordModel model;  /* Model wrapper object. */
 
     /* Load the model. */
-    if (!model.Init()) {
+    if (!model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen())) {
         printf_err("Failed to initialise model\n");
         return;
     }
 
+#if !defined(ARM_NPU)
+    /* If it is not a NPU build check if the model contains a NPU operator */
+    if (model.ContainsEthosUOperator()) {
+        printf_err("No driver support for Ethos-U operator found in the model.\n");
+        return;
+    }
+#endif /* ARM_NPU */
+
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
 
@@ -55,7 +76,7 @@
     constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
     do {
         int menuOption = common::MENU_OPT_RUN_INF_NEXT;
-        if (bUseMenu) { 
+        if (bUseMenu) {
             DisplayCommonMenu();
             menuOption = arm::app::ReadUserInputAsInt();
             printf("\n");
diff --git a/source/use_case/vww/usecase.cmake b/source/use_case/vww/usecase.cmake
index 8bf55fc..f6a3efe 100644
--- a/source/use_case/vww/usecase.cmake
+++ b/source/use_case/vww/usecase.cmake
@@ -1,3 +1,4 @@
+#----------------------------------------------------------------------------
 #  Copyright (c) 2021 Arm Limited. All rights reserved.
 #  SPDX-License-Identifier: Apache-2.0
 #
@@ -12,7 +13,10 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
+#----------------------------------------------------------------------------
 
+# Append the API to use for this use case
+list(APPEND ${use_case}_API_LIST "vww")
 
 USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files, or path to a single image file, to use in the evaluation application"
     ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
index 6a1813f..d837617 100644
--- a/tests/use_case/ad/InferenceTestAD.cc
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -22,11 +22,21 @@
 #include "TestData_ad.hpp"
 #include "log_macros.h"
 #include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
 
 #ifndef AD_FEATURE_VEC_DATA_SIZE
 #define AD_IN_FEATURE_VEC_DATA_SIZE (1024)
 #endif /* AD_FEATURE_VEC_DATA_SIZE */
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 using namespace test;
 
 bool RunInference(arm::app::Model& model, const int8_t vec[])
@@ -84,7 +94,10 @@
     arm::app::AdModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
     REQUIRE(model.IsInited());
 
     REQUIRE(RunInferenceRandom(model));
@@ -102,11 +115,14 @@
             arm::app::AdModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
 
         }
     }
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index 53c92ab..643f805 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -17,10 +17,21 @@
 #include "TensorFlowLiteMicro.hpp"
 #include "Wav2LetterModel.hpp"
 #include "TestData_asr.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+namespace app {
+    static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    namespace asr {
+        extern uint8_t* GetModelPointer();
+        extern size_t GetModelLen();
+    } /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
+
 using namespace test;
 
 bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
@@ -58,7 +69,10 @@
     arm::app::Wav2LetterModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen()));
     REQUIRE(model.IsInited());
 
     REQUIRE(RunInferenceRandom(model));
@@ -96,11 +110,14 @@
             arm::app::Wav2LetterModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
 
         }
     }
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
index 11c4919..9c3d658 100644
--- a/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
+++ b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
@@ -17,11 +17,23 @@
 #include "Wav2LetterPostprocess.hpp"
 #include "Wav2LetterModel.hpp"
 #include "ClassificationResult.hpp"
+#include "BufAttributes.hpp"
 
 #include <algorithm>
 #include <catch.hpp>
 #include <limits>
 
+namespace arm {
+namespace app {
+    static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+    namespace asr {
+        extern uint8_t* GetModelPointer();
+        extern size_t GetModelLen();
+    } /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
+
 template <typename T>
 static TfLiteTensor GetTestTensor(
                         std::vector<int>&      shape,
@@ -51,7 +63,10 @@
         const uint32_t outputCtxLen = 5;
         arm::app::AsrClassifier classifier;
         arm::app::Wav2LetterModel model;
-        model.Init();
+        model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen());
         std::vector<std::string> dummyLabels = {"a", "b", "$"};
         const uint32_t blankTokenIdx = 2;
         std::vector<arm::app::ClassificationResult> dummyResult;
@@ -71,7 +86,10 @@
         const uint32_t outputCtxLen = 5;
         arm::app::AsrClassifier classifier;
         arm::app::Wav2LetterModel model;
-        model.Init();
+        model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen());
         std::vector<std::string> dummyLabels = {"a", "b", "$"};
         const uint32_t blankTokenIdx = 2;
         std::vector<arm::app::ClassificationResult> dummyResult;
@@ -102,7 +120,10 @@
     std::vector<int> tensorShape = {1, 1, nRows, nCols};
     arm::app::AsrClassifier classifier;
     arm::app::Wav2LetterModel model;
-    model.Init();
+    model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen());
     std::vector<std::string> dummyLabels = {"a", "b", "$"};
     std::vector<arm::app::ClassificationResult> dummyResult;
 
diff --git a/tests/use_case/img_class/ImgClassificationUCTest.cc b/tests/use_case/img_class/ImgClassificationUCTest.cc
index b9caf61..d8339b6 100644
--- a/tests/use_case/img_class/ImgClassificationUCTest.cc
+++ b/tests/use_case/img_class/ImgClassificationUCTest.cc
@@ -24,13 +24,25 @@
 
 #include <catch.hpp>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 TEST_CASE("Model info")
 {
     /* Model wrapper object. */
     arm::app::MobileNetModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -50,7 +62,10 @@
     arm::app::MobileNetModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -83,7 +98,10 @@
     arm::app::MobileNetModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -112,11 +130,14 @@
     arm::app::MobileNetModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
     caseContext.Set<arm::app::Model&>("model", model);
 
     REQUIRE(arm::app::ListFilesHandler(caseContext));
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index 7e7508b..30ce19f 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -18,9 +18,19 @@
 #include "MobileNetModel.hpp"
 #include "TensorFlowLiteMicro.hpp"
 #include "TestData_img_class.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 using namespace test;
 
 bool RunInference(arm::app::Model& model, const int8_t imageData[])
@@ -67,7 +77,10 @@
         arm::app::MobileNetModel model{};
 
         REQUIRE_FALSE(model.IsInited());
-        REQUIRE(model.Init());
+        REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
         REQUIRE(model.IsInited());
 
         for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
@@ -81,7 +94,10 @@
             arm::app::MobileNetModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<uint8_t>(i, model, 1);
diff --git a/tests/use_case/kws/InferenceTestMicroNetKws.cc b/tests/use_case/kws/InferenceTestMicroNetKws.cc
index 41ecc3c..a6f7a03 100644
--- a/tests/use_case/kws/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws/InferenceTestMicroNetKws.cc
@@ -17,10 +17,22 @@
 #include "MicroNetKwsModel.hpp"
 #include "TestData_kws.hpp"
 #include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+namespace app {
+    static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+    namespace kws {
+        extern uint8_t *GetModelPointer();
+        extern size_t GetModelLen();
+    } /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
+
 using namespace test;
 
 bool RunInference(arm::app::Model& model, const int8_t vec[])
@@ -78,7 +90,10 @@
     arm::app::MicroNetKwsModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
     REQUIRE(model.IsInited());
 
     REQUIRE(RunInferenceRandom(model));
@@ -96,7 +111,10 @@
             arm::app::MicroNetKwsModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
diff --git a/tests/use_case/kws/KWSHandlerTest.cc b/tests/use_case/kws/KWSHandlerTest.cc
index c24faa4..d9d00a8 100644
--- a/tests/use_case/kws/KWSHandlerTest.cc
+++ b/tests/use_case/kws/KWSHandlerTest.cc
@@ -24,13 +24,26 @@
 #include "Classifier.hpp"
 #include "UseCaseCommonUtils.hpp"
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+        namespace kws {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+    } /* namespace app */
+} /* namespace arm */
+
 TEST_CASE("Model info")
 {
     /* Model wrapper object. */
     arm::app::MicroNetKwsModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -50,7 +63,10 @@
     arm::app::MicroNetKwsModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -58,8 +74,8 @@
     arm::app::Profiler profiler{"kws"};
     caseContext.Set<arm::app::Profiler&>("profiler", profiler);
     caseContext.Set<arm::app::Model&>("model", model);
-    caseContext.Set<int>("frameLength", g_FrameLength);  /* 640 sample length for MicroNetKws. */
-    caseContext.Set<int>("frameStride", g_FrameStride);  /* 320 sample stride for MicroNetKws. */
+    caseContext.Set<int>("frameLength", arm::app::kws::g_FrameLength);  /* 640 sample length for MicroNetKws. */
+    caseContext.Set<int>("frameStride", arm::app::kws::g_FrameStride);  /* 320 sample stride for MicroNetKws. */
     caseContext.Set<float>("scoreThreshold", 0.5);       /* Normalised score threshold. */
 
     arm::app::Classifier classifier;                     /* classifier wrapper object. */
@@ -122,7 +138,10 @@
     arm::app::MicroNetKwsModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -131,8 +150,8 @@
     caseContext.Set<arm::app::Profiler&>("profiler", profiler);
     caseContext.Set<arm::app::Model&>("model", model);
     caseContext.Set<uint32_t>("clipIndex", 0);
-    caseContext.Set<int>("frameLength", g_FrameLength);  /* 640 sample length for MicroNet. */
-    caseContext.Set<int>("frameStride", g_FrameStride);  /* 320 sample stride for MicroNet. */
+    caseContext.Set<int>("frameLength", arm::app::kws::g_FrameLength);  /* 640 sample length for MicroNet. */
+    caseContext.Set<int>("frameStride", arm::app::kws::g_FrameStride);  /* 320 sample stride for MicroNet. */
     caseContext.Set<float>("scoreThreshold", 0.7);       /* Normalised score threshold. */
     arm::app::Classifier classifier;                     /* classifier wrapper object. */
     caseContext.Set<arm::app::Classifier&>("classifier", classifier);
@@ -153,7 +172,10 @@
     arm::app::MicroNetKwsModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -161,4 +183,4 @@
     caseContext.Set<arm::app::Model&>("model", model);
 
     REQUIRE(arm::app::ListFilesHandler(caseContext));
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
index a493021..4ba4693 100644
--- a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
@@ -17,10 +17,21 @@
 #include "MicroNetKwsModel.hpp"
 #include "TestData_kws.hpp"
 #include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+        namespace kws {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+    } /* namespace app */
+} /* namespace arm */
+
 namespace test {
 namespace kws {
 
@@ -75,7 +86,10 @@
     arm::app::MicroNetKwsModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
     REQUIRE(model.IsInited());
 
     REQUIRE(RunInferenceRandom(model));
@@ -91,7 +105,10 @@
             arm::app::MicroNetKwsModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::kws::GetModelPointer(),
+                    arm::app::kws::GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
@@ -101,4 +118,4 @@
 }
 
 } //namespace
-} //namespace
\ No newline at end of file
+} //namespace
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 1c5f20a..5d30211 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -17,10 +17,22 @@
 #include "TensorFlowLiteMicro.hpp"
 #include "Wav2LetterModel.hpp"
 #include "TestData_asr.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+        namespace asr {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+    } /* namespace app */
+} /* namespace arm */
+
 namespace test {
 namespace asr {
 
@@ -59,7 +71,10 @@
     arm::app::Wav2LetterModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                       sizeof(arm::app::tensorArena),
+                       arm::app::asr::GetModelPointer(),
+                       arm::app::asr::GetModelLen()));
     REQUIRE(model.IsInited());
 
     REQUIRE(RunInferenceRandom(model));
@@ -98,7 +113,10 @@
             arm::app::Wav2LetterModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
diff --git a/tests/use_case/kws_asr/InitModels.cc b/tests/use_case/kws_asr/InitModels.cc
index 97aa092..85841a3 100644
--- a/tests/use_case/kws_asr/InitModels.cc
+++ b/tests/use_case/kws_asr/InitModels.cc
@@ -16,9 +16,25 @@
  */
 #include "MicroNetKwsModel.hpp"
 #include "Wav2LetterModel.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+        namespace asr {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+        namespace kws {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+    } /* namespace app */
+} /* namespace arm */
+
 /* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
 TEST_CASE("Init two Models", "[.]")
 {
@@ -35,13 +51,20 @@
     //arm::app::Wav2LetterModel model2;     /* model2. */
 
     /* Load/initialise the first model. */
-    REQUIRE(model1.Init());
+    REQUIRE(model1.Init(arm::app::tensorArena,
+                        sizeof(arm::app::tensorArena),
+                        arm::app::kws::GetModelPointer(),
+                        arm::app::kws::GetModelLen()));
 
     /* Allocator instance should have been created. */
     REQUIRE(nullptr != model1.GetAllocator());
 
     /* Load the second model using the same allocator as model 1. */
-    REQUIRE(model2.Init(model1.GetAllocator()));
+    REQUIRE(model2.Init(arm::app::tensorArena,
+                        sizeof(arm::app::tensorArena),
+                        arm::app::asr::GetModelPointer(),
+                        arm::app::asr::GetModelLen(),
+                        model1.GetAllocator()));
 
     /* Make sure they point to the same allocator object. */
     REQUIRE(model1.GetAllocator() == model2.GetAllocator());
@@ -49,4 +72,4 @@
     /* Both models should report being initialised. */
     REQUIRE(true == model1.IsInited());
     REQUIRE(true == model2.IsInited());
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
index e343b66..d2071ea 100644
--- a/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
+++ b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
@@ -17,11 +17,27 @@
 #include "Wav2LetterPostprocess.hpp"
 #include "Wav2LetterModel.hpp"
 #include "ClassificationResult.hpp"
+#include "BufAttributes.hpp"
 
 #include <algorithm>
 #include <catch.hpp>
 #include <limits>
 
+namespace arm {
+    namespace app {
+        static uint8_t  tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+
+        namespace asr {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+        namespace kws {
+            extern uint8_t* GetModelPointer();
+            extern size_t GetModelLen();
+        }
+    } /* namespace app */
+} /* namespace arm */
+
 template <typename T>
 static TfLiteTensor GetTestTensor(
         std::vector<int>&      shape,
@@ -51,7 +67,10 @@
         const uint32_t outputCtxLen = 5;
         arm::app::AsrClassifier classifier;
         arm::app::Wav2LetterModel model;
-        model.Init();
+        model.Init(arm::app::tensorArena,
+                   sizeof(arm::app::tensorArena),
+                   arm::app::asr::GetModelPointer(),
+                   arm::app::asr::GetModelLen());
         std::vector<std::string> dummyLabels = {"a", "b", "$"};
         const uint32_t blankTokenIdx = 2;
         std::vector<arm::app::ClassificationResult> dummyResult;
@@ -71,7 +90,10 @@
         const uint32_t outputCtxLen = 5;
         arm::app::AsrClassifier classifier;
         arm::app::Wav2LetterModel model;
-        model.Init();
+        model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                   arm::app::asr::GetModelPointer(),
+                   arm::app::asr::GetModelLen());
         std::vector<std::string> dummyLabels = {"a", "b", "$"};
         const uint32_t blankTokenIdx = 2;
         std::vector<arm::app::ClassificationResult> dummyResult;
@@ -102,7 +124,10 @@
     std::vector<int> tensorShape = {1, 1, nRows, nCols};
     arm::app::AsrClassifier classifier;
     arm::app::Wav2LetterModel model;
-    model.Init();
+    model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    arm::app::asr::GetModelPointer(),
+                    arm::app::asr::GetModelLen());
     std::vector<std::string> dummyLabels = {"a", "b", "$"};
     std::vector<arm::app::ClassificationResult> dummyResult;
 
diff --git a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
index 4c9786f..9dc640b 100644
--- a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
+++ b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
@@ -17,10 +17,20 @@
 #include "TensorFlowLiteMicro.hpp"
 #include "RNNoiseModel.hpp"
 #include "TestData_noise_reduction.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 namespace test {
 namespace rnnoise {
 
@@ -62,7 +72,10 @@
         arm::app::RNNoiseModel model{};
 
         REQUIRE_FALSE(model.IsInited());
-        REQUIRE(model.Init());
+        REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
         REQUIRE(model.IsInited());
 
         REQUIRE(RunInferenceRandom(model));
@@ -121,7 +134,10 @@
             arm::app::RNNoiseModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInference<int8_t>(goldenInputFV, goldenOutputFV, model);
diff --git a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
index cc1b4d7..bebfdfd 100644
--- a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
+++ b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
@@ -24,6 +24,15 @@
 #include <hal.h>
 #include <Profiler.hpp>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 #define PLATFORM    hal_platform_init();
 
 #define CONTEXT \
@@ -38,7 +47,10 @@
     std::vector<uint8_t> memPool(maxMemDumpSz); /* Memory pool */
     arm::app::RNNoiseModel model{};
 
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
     REQUIRE(model.IsInited());
 
     /* Populate the output tensors */
@@ -105,7 +117,10 @@
     caseContext.Set<uint32_t>("frameStride", g_FrameStride);
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
 }
@@ -136,7 +151,10 @@
     caseContext.Set<uint32_t>("frameStride", g_FrameStride);
     caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     size_t oneInferenceOutSizeBytes = g_FrameLength * sizeof(int16_t);
 
diff --git a/tests/use_case/noise_reduction/RNNoiseModelTests.cc b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
index 7798975..9720ba5 100644
--- a/tests/use_case/noise_reduction/RNNoiseModelTests.cc
+++ b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
@@ -17,10 +17,20 @@
 #include "RNNoiseModel.hpp"
 #include "TensorFlowLiteMicro.hpp"
 #include "TestData_noise_reduction.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 #include <random>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 bool RunInference(arm::app::Model& model, std::vector<int8_t> vec,
                     const size_t sizeRequired, const size_t dataInputIndex)
 {
@@ -61,7 +71,10 @@
     arm::app::RNNoiseModel model{};
 
     REQUIRE_FALSE(model.IsInited());
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                       sizeof(arm::app::tensorArena),
+                       GetModelPointer(),
+                       GetModelLen()));
     REQUIRE(model.IsInited());
 
     model.ResetGruState();
@@ -114,7 +127,10 @@
 TEST_CASE("Test initial GRU out state is 0", "[RNNoise]")
 {
     TestRNNoiseModel model{};
-    model.Init();
+    model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen());
 
     auto map = model.GetStateMap();
 
@@ -135,7 +151,10 @@
 TEST_CASE("Test GRU state copy", "[RNNoise]")
 {
     TestRNNoiseModel model{};
-    model.Init();
+    model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen());
     REQUIRE(RunInferenceRandom(model, 0));
 
     auto map = model.GetStateMap();
@@ -162,4 +181,4 @@
         statesIter++;
     }
 
-}
\ No newline at end of file
+}
diff --git a/tests/use_case/object_detection/InferenceTestYoloFastest.cc b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
index 2c035e7..1b4d1dd 100644
--- a/tests/use_case/object_detection/InferenceTestYoloFastest.cc
+++ b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
@@ -22,6 +22,15 @@
 #include "InputFiles.hpp"
 #include "UseCaseCommonUtils.hpp"
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 #include <catch.hpp>
 
 void GetExpectedResults(std::vector<std::vector<arm::app::object_detection::DetectionResult>> &expected_results)
@@ -122,7 +131,10 @@
         arm::app::YoloFastestModel model{};
 
         REQUIRE_FALSE(model.IsInited());
-        REQUIRE(model.Init());
+        REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
         REQUIRE(model.IsInited());
 
         for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) {
@@ -136,7 +148,10 @@
             arm::app::YoloFastestModel model{};
 
             REQUIRE_FALSE(model.IsInited());
-            REQUIRE(model.Init());
+            REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
             REQUIRE(model.IsInited());
 
             TestInferenceDetectionResults<uint8_t>(i, model, 1);
diff --git a/tests/use_case/object_detection/ObjectDetectionUCTest.cc b/tests/use_case/object_detection/ObjectDetectionUCTest.cc
index 023b893..ffb4976 100644
--- a/tests/use_case/object_detection/ObjectDetectionUCTest.cc
+++ b/tests/use_case/object_detection/ObjectDetectionUCTest.cc
@@ -20,16 +20,29 @@
 #include "YoloFastestModel.hpp"
 #include "UseCaseHandler.hpp"
 #include "UseCaseCommonUtils.hpp"
+#include "BufAttributes.hpp"
 
 #include <catch.hpp>
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 TEST_CASE("Model info")
 {
     /* Model wrapper object. */
     arm::app::YoloFastestModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -49,7 +62,10 @@
     arm::app::YoloFastestModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -72,7 +88,10 @@
     arm::app::YoloFastestModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
@@ -95,7 +114,10 @@
     arm::app::YoloFastestModel model;
 
     /* Load the model. */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context. */
     arm::app::ApplicationContext caseContext;
diff --git a/tests/use_case/vww/VisualWakeWordUCTests.cc b/tests/use_case/vww/VisualWakeWordUCTests.cc
index 531764b..05a31a4 100644
--- a/tests/use_case/vww/VisualWakeWordUCTests.cc
+++ b/tests/use_case/vww/VisualWakeWordUCTests.cc
@@ -24,12 +24,24 @@
 #include "Classifier.hpp"
 #include "UseCaseCommonUtils.hpp"
 
+namespace arm {
+    namespace app {
+        static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+    } /* namespace app */
+} /* namespace arm */
+
+extern uint8_t* GetModelPointer();
+extern size_t GetModelLen();
+
 TEST_CASE("Model info")
 {
     arm::app::VisualWakeWordModel model;    /* model wrapper object */
 
     /* Load the model */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context */
     arm::app::ApplicationContext caseContext;
@@ -46,7 +58,10 @@
     arm::app::VisualWakeWordModel model;    /* model wrapper object */
 
     /* Load the model */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context */
     arm::app::ApplicationContext caseContext;
@@ -76,7 +91,10 @@
     arm::app::VisualWakeWordModel model;    /* model wrapper object */
 
     /* Load the model */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context */
     arm::app::ApplicationContext caseContext;
@@ -102,7 +120,10 @@
     arm::app::VisualWakeWordModel model;    /* model wrapper object */
 
     /* Load the model */
-    REQUIRE(model.Init());
+    REQUIRE(model.Init(arm::app::tensorArena,
+                    sizeof(arm::app::tensorArena),
+                    GetModelPointer(),
+                    GetModelLen()));
 
     /* Instantiate application context */
     arm::app::ApplicationContext caseContext;
@@ -110,4 +131,4 @@
     caseContext.Set<arm::app::Model&>("model", model);
 
     REQUIRE(arm::app::ListFilesHandler(caseContext));
-}
\ No newline at end of file
+}